From e2e0f9a64f0900267fabe57a08dd9ee1467ea380 Mon Sep 17 00:00:00 2001 From: sneakers-the-rat Date: Tue, 28 Nov 2023 07:26:49 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20p2p-ld/d?= =?UTF-8?q?ocs@318562a519b4b7a309a7bfe3918cfab81b131f2e=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .buildinfo | 2 +- _sources/comparison/data/graphdb.md.txt | 29 + _sources/comparison/data/index.md.txt | 6 +- _sources/comparison/data/sqlite.md.txt | 159 +++++ _sources/comparison/index.md.txt | 9 + _sources/comparison/ld/index.md.txt | 6 +- _sources/comparison/p2p/bittorrent.md.txt | 2 +- _sources/comparison/social/at_protocol.md.txt | 6 + _sources/encryption.md.txt | 2 +- _sources/index.md.txt | 10 +- _sources/todo.md.txt | 4 + backwards_compatibility.html | 38 +- codecs/hdf5.html | 10 +- codecs/index.html | 10 +- comparison/data/datalad.html | 10 +- comparison/data/dmc.html | 10 +- comparison/data/eris.html | 16 +- comparison/data/graphdb.html | 396 +++++++++++++ comparison/data/index.html | 13 +- comparison/data/sqlite.html | 558 ++++++++++++++++++ comparison/index.html | 28 +- comparison/ld/hdt.html | 10 +- comparison/ld/index.html | 41 +- comparison/ld/ld_fragments.html | 10 +- comparison/ld/ld_platform.html | 10 +- comparison/ld/nanopubs.html | 10 +- comparison/ld/rdf.html | 10 +- comparison/ld/solid.html | 10 +- comparison/p2p/bittorrent.html | 26 +- comparison/p2p/hypercore.html | 10 +- comparison/p2p/index.html | 10 +- comparison/p2p/ipfs.html | 10 +- comparison/p2p/spritely.html | 10 +- comparison/social/activitypub.html | 10 +- comparison/social/at_protocol.html | 15 +- comparison/social/index.html | 10 +- comparison/social/matrix.html | 10 +- comparison/social/nostr.html | 10 +- comparison/social/ssb.html | 10 +- comparison/social/xmpp.html | 10 +- data_structures.html | 10 +- definitions.html | 10 +- design.html | 10 +- discovery.html | 10 +- encryption.html | 43 +- evolvability.html | 18 +- federation.html | 28 +- genindex.html | 91 ++- identity.html | 10 +- index.html | 20 +- objects.inv | Bin 1130 -> 1179 bytes out_of_scope.html | 10 +- overview.html | 10 +- p2p_concepts.html | 16 +- protocol.html | 10 +- querying.html | 10 +- references.html | 22 +- search.html | 10 +- searchindex.js | 2 +- sketchpad.html | 10 +- todo.html | 329 +++++++++++ translation/index.html | 10 +- triplets.html | 14 +- vocabulary.html | 10 +- 64 files changed, 1995 insertions(+), 284 deletions(-) create mode 100644 _sources/comparison/data/graphdb.md.txt create mode 100644 _sources/comparison/data/sqlite.md.txt create mode 100644 _sources/todo.md.txt create mode 100644 comparison/data/graphdb.html create mode 100644 comparison/data/sqlite.html create mode 100644 todo.html diff --git a/.buildinfo b/.buildinfo index 6eba67e..ebfde2e 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: f2f18cd765304450379671f4a8b4169f +config: fe80451c36b7469d0347596cb9814e3a tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_sources/comparison/data/graphdb.md.txt b/_sources/comparison/data/graphdb.md.txt new file mode 100644 index 0000000..dfdbddb --- /dev/null +++ b/_sources/comparison/data/graphdb.md.txt @@ -0,0 +1,29 @@ +# Graph Databases + +```{index} see: Triple Store; Graph Database + +``` +```{index} Database Engine; Graph Database + +``` + + +({index}`Graph Database`s and {index}`Triple Store`s) + +## Options + +```{table} Graph Databases +:width: 100% + +| DB | SPARQL? | Language | Description | +| -- | ------- | -------- | ----------- | +| [{index}`Oxigraph `](https://github.com/oxigraph/oxigraph) | Y | Rust, Python, JS | "Trying to do SQLite for graph dbs" | +| {index}`Blazegraph ` | | | | +| {index}`GraphDB ` | | | | +| {index}`Jena ` | | | | +| {index}`Virtuoso ` | | | | +``` + +## TODO + +- What in the heck is {index}`SOLID` using? \ No newline at end of file diff --git a/_sources/comparison/data/index.md.txt b/_sources/comparison/data/index.md.txt index a51e3c7..2755dec 100644 --- a/_sources/comparison/data/index.md.txt +++ b/_sources/comparison/data/index.md.txt @@ -7,4 +7,8 @@ datalad dmc eris -``` \ No newline at end of file +graphdb +sqlite +``` + +AND SEE https://github.com/bonfire-networks/pointers for a data model re this thread: https://social.treehouse.systems/@jubilee/110665600584252989 \ No newline at end of file diff --git a/_sources/comparison/data/sqlite.md.txt b/_sources/comparison/data/sqlite.md.txt new file mode 100644 index 0000000..0b94adc --- /dev/null +++ b/_sources/comparison/data/sqlite.md.txt @@ -0,0 +1,159 @@ +# SQLite + +```{index} Database Engine; RDBMS +``` +```{index} RDBMS; SQLite +``` + +We want something like sqlite, but for {index}`Graph Database`s! + +Most of the existing triple stores and graph databases are very heavyweight services that would be impractical for packaging in a portable daemon in the same way that sqlite works. Maybe we can learn from how sqlite works and do something similar for graph databases? + +Questions: + +- How come these things can be faster than idk like a .json file +- How are they different architecturally than a traditional SQL server + +## File Structure + +- Main file +- Rollback Journal - stores additional information to restore in case of a crash. Store a copy of the original DB, write changes directly into DB file. COMMIT occurs when rollback is deleted +- Write-ahead Log - if in [WAL mode](https://www.sqlite.org/wal.html), append updates to WAL file. COMMIT occurs when writing to WAL file (not to main DB). Multiple transactions can be batched. + +### Pages + +Pages are the basic unit of an sqlite file. + +Numeracy: + +- Each page can be a power of 2 between 512 and 65536 +- All pages are the same size +- Max `2^32 - 2` pages in a single DB. + + +#### Types + +Each page has a single type: + + +> - The lock-byte page +> - A freelist page +> - A freelist trunk page +> - A freelist leaf page +> - A b-tree page +> - A table b-tree interior page +> - A table b-tree leaf page +> - An index b-tree interior page +> - An index b-tree leaf page +> - A payload overflow page +> - A pointer map page + +##### Lock-byte + +(artifact of windows 95 compatibility) + +##### Freelist + +Linked list of "trunks and leaves" to keep track of unused pages: +- Trunk pages: + - Series of 4-byte integers that take up full page + - First integer is the page number of the next trunk (zero if it's the last page) + - Second integer is number of leaf pointers that follow +- Leaf pages: + - contain nothing! + +##### {index}`B-tree` + +([B-tree wiki page](https://en.wikipedia.org/wiki/B-tree)) + +Two types of b-trees: table and index + +- **Table B-Trees**: + - One table b-tree in the db file for each `rowid` table in the database schema + - 64-bit signed integer key that refers to the `rowid` it implements + - Store all data in leaves (interior pages just point to leaves) + - +- **Index B-Trees**: + - One index b-tree for each index in the schema + - Arbitrary keys + - Store no data. + +Two types of b-tree pages: +- **Interior** +- **Leaf** + +```{todo} +Describe freeblocks +``` + +#### Payload Overflow + +> Define the "payload" of a cell to be the arbitrary length section of the cell. +> - For an index b-tree, the key is always arbitrary in length and hence the payload is the key. +> - There are no arbitrary length elements in the cells of interior table b-tree pages and so those cells have no payload. +> - Table b-tree leaf pages contain arbitrary length content and so for cells on those pages the payload is the content. + +When a payload is bigger than some threshold[^overflowthreshold], store it on a linked list of payload overload pages. The first four bytes of each overflow page are a 4-byte big-endian integer indicating the page number of the next page in the chain, or zero for the final page. + +[^overflowthreshold]: > The overflow thresholds are designed to give a minimum fanout of 4 for index b-trees and to make sure enough of the payload is on the b-tree page that the record header can usually be accessed without consulting an overflow page. In hindsight, the designer of the SQLite b-tree logic realized that these thresholds could have been made much simpler. However, the computations cannot be changed without resulting in an incompatible file format. And the current computations work well, even if they are a little complex. + +#### Pointer Maps + +Backlinks from child to parent nodes in index trees to assist with vacuuming :) + +Each pointermap page provides backlinks for the pages immediately following it. + +Each 5-byte ptrmap entry consists of: + +- 1 byte of page type information: + - `0`: A b-tree root page + - `0`: Freelist page + - `prior page` or `first page`: payload overflow page + - `parent page`: non-root b-tree page +- 4 byte big-endian page number + + +### Header + +(Add header info here as the rest of the spec makes it relevant) + +https://www.sqlite.org/fileformat.html#the_database_header + +Useful properties +- Magic header string makes it easy to identify sqlite files +- File change counter & schema cookie - 4-byte integer that increments whenever the db file is unlocked. useful for cache invalidation +- `version-valid-for-number` - stores the version of the software that most recently modified it, and the change counter at that modification. Useful for detecting if certain behaviors like updating the in-header db size are behaving correctly by knowing what version made a given change. + +## Schema + +### Records + +### Tables + +### Indices + +## I/O + +```{todo} +**How does writing and querying an sqlite file actually work???** +``` + +All reads from and writes to the main database file happen at a page boundary. + +All writes are an integer number of pages in size. + +Most reads are also an integer number of pages in size, except opening the database which reads the header (first 100 bytes). + + + + +## See also + +- [Graph Databases](graphdb) + +## References + +- [SQLite File Format](https://www.sqlite.org/fileformat.html) +- [SQLite Quirks](https://www.sqlite.org/quirks.html) - useful for understanding some design decisions +- [Customization and Porting](https://www.sqlite.org/custombuild.html) +- [SQLite Architecture](https://www.sqlite.org/arch.html) \ No newline at end of file diff --git a/_sources/comparison/index.md.txt b/_sources/comparison/index.md.txt index 8b5043b..227f910 100644 --- a/_sources/comparison/index.md.txt +++ b/_sources/comparison/index.md.txt @@ -15,6 +15,7 @@ data/index ## To be categorized +- [CozoDB](https://docs.cozodb.org/en/latest/releases/v0.6.html#experience-cozodb-the-hybrid-relational-graph-vector-database-the-hippocampus-for-llms) - uh i think this is the database we needed... - Agregore - Arweave - CAN @@ -30,6 +31,14 @@ data/index - chunks stored by nodes close in hash space - Repute.Social - LinkedTrust.us +- https://ganarchy.github.io/ - pull request-less git + +## See also + +- https://gitlab.com/bluesky-community1/decentralized-ecosystem/-/blob/master/README.md +- https://dsocialcommons.org/ +- https://openengiadina.codeberg.page/rdf-cbor/ - RDF/CBOR graph serialization] + - https://openengiadina.codeberg.page/rdf-cbor/content-addressable-rdf-v0.1.html ## Points of comparison diff --git a/_sources/comparison/ld/index.md.txt b/_sources/comparison/ld/index.md.txt index 722fd27..b6ee2f0 100644 --- a/_sources/comparison/ld/index.md.txt +++ b/_sources/comparison/ld/index.md.txt @@ -17,4 +17,8 @@ Linked data was born to be p2p. Many of the [initial, lofty visions](https://jon Don't just take my word for it tho: {attribution="A more decentralized vision for Linked Data. Polleres et al. (2020)"} -> So, where does this leave us? We have seen a lot of resources being put into publishing Linked Data, but yet a publicly widely visible “killer app” is still missing. The reason for this, in the opinion and experiences of the authors, lies all to often in the frustrating experiences when trying to actually use Linked Data for building actual applications. Many attempts and projects end up still using a centralized warehousing approach, integrating a handful of data sets directly from their raw data sources, rather than being able to leverage their “lifted” Linked Data versions: the use and benefits of RDF and Linked Data over conventional databases and warehouses technologies, where more trained people are available, remain questionable. {cite}`polleresMoreDecentralizedVision2020` \ No newline at end of file +> So, where does this leave us? We have seen a lot of resources being put into publishing Linked Data, but yet a publicly widely visible “killer app” is still missing. The reason for this, in the opinion and experiences of the authors, lies all to often in the frustrating experiences when trying to actually use Linked Data for building actual applications. Many attempts and projects end up still using a centralized warehousing approach, integrating a handful of data sets directly from their raw data sources, rather than being able to leverage their “lifted” Linked Data versions: the use and benefits of RDF and Linked Data over conventional databases and warehouses technologies, where more trained people are available, remain questionable. {cite}`polleresMoreDecentralizedVision2020` + +## TODO + +- https://layeredschemas.org/ \ No newline at end of file diff --git a/_sources/comparison/p2p/bittorrent.md.txt b/_sources/comparison/p2p/bittorrent.md.txt index 8d393b6..b9dba72 100644 --- a/_sources/comparison/p2p/bittorrent.md.txt +++ b/_sources/comparison/p2p/bittorrent.md.txt @@ -43,7 +43,7 @@ For example, a directory of three random files has a (decoded) `.torrent` file t } ``` -The contents of a torrent file are then uniquely indexed by the `infohash`, which is the hash of the entire (bencoded) `info` dictionary. {key}`Magnet Links ` are an abbreviated form of the `.torrent` file that contain only the info-hash, which allows downloading peers to request and independently verify the rest of the info dictionary and start downloading without a complete `.torrent`. +The contents of a torrent file are then uniquely indexed by the `infohash`, which is the hash of the entire (bencoded) `info` dictionary. {index}`Magnet Links ` are an abbreviated form of the `.torrent` file that contain only the info-hash, which allows downloading peers to request and independently verify the rest of the info dictionary and start downloading without a complete `.torrent`. A generic magnet link looks like: diff --git a/_sources/comparison/social/at_protocol.md.txt b/_sources/comparison/social/at_protocol.md.txt index 46fea8b..6a9b852 100644 --- a/_sources/comparison/social/at_protocol.md.txt +++ b/_sources/comparison/social/at_protocol.md.txt @@ -13,6 +13,12 @@ Specifically, AT protocol differentiates between *handles* and *identities*, whe That's about it, the rest of the handling of DID's is extremely centralized (see [did:plc](https://atproto.com/specs/did-plc) which requires resolution against a single domain), and the requirement of all posts to be funneled through [Big Graph Services](https://blueskyweb.xyz/blog/5-5-2023-federation-architecture) rather than directly peer to peer is transparently designed to ensure a marketing and advertising layer in between actors in the network. +```{note} +Lexicons were based on RDF? + +https://gist.github.com/pfrazee/0c51dc1afceac83d984ebfd555fe6340 +``` + ## Lessons diff --git a/_sources/encryption.md.txt b/_sources/encryption.md.txt index 65cd0f7..2176866 100644 --- a/_sources/encryption.md.txt +++ b/_sources/encryption.md.txt @@ -3,6 +3,6 @@ How can we make it possible to have a protocol that is "open" when it is intended to, but also protects privacy and consent when we need it to? -# TODO +## TODO - https://en.wikipedia.org/wiki/OMEMO \ No newline at end of file diff --git a/_sources/index.md.txt b/_sources/index.md.txt index 1ab8d49..e143c3e 100644 --- a/_sources/index.md.txt +++ b/_sources/index.md.txt @@ -6,6 +6,8 @@ This site describes the implementation of the p2p linked data protocol in {cite} ## Document Status +**23-11-27** - Back at it again after some digressions into [chatbridge](https://git.jon-e.net/jonny/chatbridge) and [nwb-linkml](https://github.com/p2p-ld/nwb-linkml/) - gathering more information on storage and interchange formats for databases and triple stores before trying to prop up the first peers sharing graphs of NWB data. Still mostly populating the [Comparison](comparison) section as I take notes and before I restructure these docs. + **23-06-08** - Populating the [Comparison](comparison) section first to refresh myself on other projects, and starting to sketch diagrammatically in [Sketchpad](sketchpad). The rest of the pages are just stubs to keep track of ideas before fleshing them out. ```{toctree} @@ -59,11 +61,5 @@ sketchpad genindex references +todo ``` - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/_sources/todo.md.txt b/_sources/todo.md.txt new file mode 100644 index 0000000..da28a24 --- /dev/null +++ b/_sources/todo.md.txt @@ -0,0 +1,4 @@ +# TODO + +```{todolist} +``` \ No newline at end of file diff --git a/backwards_compatibility.html b/backwards_compatibility.html index aa6ac78..59df480 100644 --- a/backwards_compatibility.html +++ b/backwards_compatibility.html @@ -3,11 +3,11 @@ - + - 11. Backwards Compatibility - p2p-ld 0.1.0 documentation + 10. Backwards Compatibility - p2p-ld 0.1.0 documentation @@ -189,6 +189,8 @@
  • DataLad
  • DMC
  • ERIS
  • +
  • Graph Databases
  • +
  • SQLite
  • @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

    @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

      @@ -229,6 +230,7 @@ diff --git a/comparison/data/datalad.html b/comparison/data/datalad.html index c461a1e..78bb218 100644 --- a/comparison/data/datalad.html +++ b/comparison/data/datalad.html @@ -189,6 +189,8 @@
    • DataLad
    • DMC
    • ERIS
    • +
    • Graph Databases
    • +
    • SQLite
    @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

    @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

      @@ -229,6 +230,7 @@ diff --git a/comparison/data/eris.html b/comparison/data/eris.html index 644ba06..66cd058 100644 --- a/comparison/data/eris.html +++ b/comparison/data/eris.html @@ -3,7 +3,7 @@ - + @@ -189,6 +189,8 @@
    • DataLad
    • DMC
    • ERIS
    • +
    • Graph Databases
    • +
    • SQLite
    @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

    @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

    +

    AND SEE https://github.com/bonfire-networks/pointers for a data model re this thread: https://social.treehouse.systems/@jubilee/110665600584252989

    diff --git a/comparison/data/sqlite.html b/comparison/data/sqlite.html new file mode 100644 index 0000000..5e441dd --- /dev/null +++ b/comparison/data/sqlite.html @@ -0,0 +1,558 @@ + + + + + + + + + + SQLite - p2p-ld 0.1.0 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark mode + + + + + + + + + + + + + + + + + + + +
    +
    +
    + +
    + +
    +
    + +
    + +
    +
    + +
    +
    +
    + + + + + Back to top + +
    + +
    + +
    + +
    +
    +
    +

    SQLite#

    +

    We want something like sqlite, but for Graph Databases!

    +

    Most of the existing triple stores and graph databases are very heavyweight services that would be impractical for packaging in a portable daemon in the same way that sqlite works. Maybe we can learn from how sqlite works and do something similar for graph databases?

    +

    Questions:

    +
      +
    • How come these things can be faster than idk like a .json file

    • +
    • How are they different architecturally than a traditional SQL server

    • +
    +
    +

    File Structure#

    +
      +
    • Main file

    • +
    • Rollback Journal - stores additional information to restore in case of a crash. Store a copy of the original DB, write changes directly into DB file. COMMIT occurs when rollback is deleted

    • +
    • Write-ahead Log - if in WAL mode, append updates to WAL file. COMMIT occurs when writing to WAL file (not to main DB). Multiple transactions can be batched.

    • +
    +
    +

    Pages#

    +

    Pages are the basic unit of an sqlite file.

    +

    Numeracy:

    +
      +
    • Each page can be a power of 2 between 512 and 65536

    • +
    • All pages are the same size

    • +
    • Max 2^32 - 2 pages in a single DB.

    • +
    +
    +

    Types#

    +

    Each page has a single type:

    +
    +
      +
    • The lock-byte page

    • +
    • A freelist page

      +
        +
      • A freelist trunk page

      • +
      • A freelist leaf page

      • +
      +
    • +
    • A b-tree page

      +
        +
      • A table b-tree interior page

      • +
      • A table b-tree leaf page

      • +
      • An index b-tree interior page

      • +
      • An index b-tree leaf page

      • +
      +
    • +
    • A payload overflow page

    • +
    • A pointer map page

    • +
    +
    +
    +
    Lock-byte#
    +

    (artifact of windows 95 compatibility)

    +
    +
    +
    Freelist#
    +

    Linked list of “trunks and leaves” to keep track of unused pages:

    +
      +
    • Trunk pages:

      +
        +
      • Series of 4-byte integers that take up full page

      • +
      • First integer is the page number of the next trunk (zero if it’s the last page)

      • +
      • Second integer is number of leaf pointers that follow

      • +
      +
    • +
    • Leaf pages:

      +
        +
      • contain nothing!

      • +
      +
    • +
    +
    +
    +
    B-tree#
    +

    (B-tree wiki page)

    +

    Two types of b-trees: table and index

    +
      +
    • Table B-Trees:

      +
        +
      • One table b-tree in the db file for each rowid table in the database schema

      • +
      • 64-bit signed integer key that refers to the rowid it implements

      • +
      • Store all data in leaves (interior pages just point to leaves)

      • +
      • +
      +
    • +
    • Index B-Trees:

      +
        +
      • One index b-tree for each index in the schema

      • +
      • Arbitrary keys

      • +
      • Store no data.

      • +
      +
    • +
    +

    Two types of b-tree pages:

    +
      +
    • Interior

    • +
    • Leaf

    • +
    +
    +

    Todo

    +

    Describe freeblocks

    +
    +
    +
    +
    +

    Payload Overflow#

    +
    +

    Define the “payload” of a cell to be the arbitrary length section of the cell.

    +
      +
    • For an index b-tree, the key is always arbitrary in length and hence the payload is the key.

    • +
    • There are no arbitrary length elements in the cells of interior table b-tree pages and so those cells have no payload.

    • +
    • Table b-tree leaf pages contain arbitrary length content and so for cells on those pages the payload is the content.

    • +
    +
    +

    When a payload is bigger than some threshold[1], store it on a linked list of payload overload pages. The first four bytes of each overflow page are a 4-byte big-endian integer indicating the page number of the next page in the chain, or zero for the final page.

    +
    +
    +

    Pointer Maps#

    +

    Backlinks from child to parent nodes in index trees to assist with vacuuming :)

    +

    Each pointermap page provides backlinks for the pages immediately following it.

    +

    Each 5-byte ptrmap entry consists of:

    +
      +
    • 1 byte of page type information:

      +
        +
      • 0: A b-tree root page

      • +
      • 0: Freelist page

      • +
      • prior page or first page: payload overflow page

      • +
      • parent page: non-root b-tree page

      • +
      +
    • +
    • 4 byte big-endian page number

    • +
    +
    +
    + +
    +
    +

    Schema#

    +
    +

    Records#

    +
    +
    +

    Tables#

    +
    +
    +

    Indices#

    +
    +
    +
    +

    I/O#

    +
    +

    Todo

    +

    How does writing and querying an sqlite file actually work???

    +
    +

    All reads from and writes to the main database file happen at a page boundary.

    +

    All writes are an integer number of pages in size.

    +

    Most reads are also an integer number of pages in size, except opening the database which reads the header (first 100 bytes).

    +
    +
    +

    See also#

    + +
    +
    +

    References#

    + +
    + +
    +
    + +
    +
    + +
    + +
    +
    + + + + + + \ No newline at end of file diff --git a/comparison/index.html b/comparison/index.html index ad5b5c0..2320f47 100644 --- a/comparison/index.html +++ b/comparison/index.html @@ -189,6 +189,8 @@
  • DataLad
  • DMC
  • ERIS
  • +
  • Graph Databases
  • +
  • SQLite
  • @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

  • Data Structures
  • @@ -304,6 +309,7 @@

    To be categorized#

      +
    • CozoDB - uh i think this is the database we needed…

    • Agregore

    • Arweave

    • CAN

    • @@ -322,6 +328,19 @@
    • Repute.Social

    • LinkedTrust.us

    • +
    • https://ganarchy.github.io/ - pull request-less git

    • +
    +
    +
    +

    See also#

    +
    @@ -390,6 +409,7 @@ @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

      @@ -229,6 +230,7 @@ diff --git a/comparison/ld/index.html b/comparison/ld/index.html index 63ea423..a637475 100644 --- a/comparison/ld/index.html +++ b/comparison/ld/index.html @@ -136,7 +136,7 @@ -
    @@ -206,10 +208,9 @@
  • 6. Vocabulary
  • 7. Querying
  • 8. Encryption
  • -
  • 9. TODO
  • -
  • 10. Federation
  • -
  • 11. Backwards Compatibility
  • -
  • 12. Evolvability
  • +
  • 9. Federation
  • +
  • 10. Backwards Compatibility
  • +
  • 11. Evolvability
  • Ecosystem

      @@ -229,6 +230,7 @@ @@ -257,7 +259,7 @@ -
    @@ -327,9 +335,28 @@ -