From dbb397f96652c665e74114e3011e1ad4b7975cd8 Mon Sep 17 00:00:00 2001 From: Niko PLP Date: Sat, 16 Mar 2024 05:34:49 +0200 Subject: [PATCH] refactor commits, permissions, capabilities, signature --- Cargo.lock | 190 ++++++++ ng-wallet/src/lib.rs | 9 +- ng-wallet/src/types.rs | 20 +- ngd/src/cli.rs | 2 +- ngd/src/main.rs | 6 +- p2p-net/src/actors/start.rs | 8 +- p2p-net/src/types.rs | 547 ++++++++++++++------- p2p-repo/Cargo.toml | 2 + p2p-repo/src/block.rs | 45 +- p2p-repo/src/branch.rs | 113 ++--- p2p-repo/src/commit.rs | 511 ++++++++++++++++---- p2p-repo/src/object.rs | 283 +++++++---- p2p-repo/src/repo.rs | 47 +- p2p-repo/src/site.rs | 20 +- p2p-repo/src/store.rs | 11 +- p2p-repo/src/types.rs | 783 +++++++++++++++++++++++-------- p2p-repo/src/utils.rs | 2 +- stores-lmdb/src/repo_store.rs | 4 +- stores-rocksdb/src/repo_store.rs | 4 +- 19 files changed, 1931 insertions(+), 676 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c47beb8..ee0e6a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12,6 +12,15 @@ dependencies = [ "psl-types", ] +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + [[package]] name = "adler" version = "1.0.2" @@ -487,6 +496,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + [[package]] name = "base64" version = "0.13.1" @@ -1552,6 +1576,28 @@ dependencies = [ "zune-inflate", ] +[[package]] +name = "failure" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" +dependencies = [ + "backtrace", + "failure_derive", +] + +[[package]] +name = "failure_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure", +] + [[package]] name = "fastbloom-rs" version = "0.5.3" @@ -1588,6 +1634,31 @@ dependencies = [ "simd-adler32", ] +[[package]] +name = "ff" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4b967a3ee6ae993f0094174257d404a5818f58be79d67a1aea1ec8996d28906" +dependencies = [ + "byteorder", + "ff_derive", + "rand_core 0.5.1", +] + +[[package]] +name = "ff_derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3776aaf60a45037a9c3cabdd8542b38693acaa3e241ff957181b72579d29feb" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "fiat-crypto" version = "0.1.20" @@ -1971,6 +2042,12 @@ dependencies = [ "weezl", ] +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + [[package]] name = "gio" version = "0.16.7" @@ -2093,6 +2170,17 @@ dependencies = [ "system-deps", ] +[[package]] +name = "group" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f15be54742789e36f03307c8fdf0621201e1345e94f1387282024178b5e9ec8c" +dependencies = [ + "ff", + "rand 0.7.3", + "rand_xorshift", +] + [[package]] name = "gtk" version = "0.16.2" @@ -2256,6 +2344,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + [[package]] name = "html5ever" version = "0.26.0" @@ -3282,6 +3376,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -3371,6 +3476,15 @@ dependencies = [ "objc", ] +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + [[package]] name = "once_cell" version = "1.18.0" @@ -3557,11 +3671,13 @@ dependencies = [ "gloo-timers", "hex", "log", + "once_cell", "rand 0.7.3", "serde", "serde_bare", "serde_bytes", "slice_as_array", + "threshold_crypto", "time 0.3.23", "wasm-bindgen", "web-time", @@ -3591,6 +3707,18 @@ dependencies = [ "libm", ] +[[package]] +name = "pairing" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8290dea210a712682cd65031dc2b34fd132cf2729def3df7ee08f0737ff5ed6" +dependencies = [ + "byteorder", + "ff", + "group", + "rand_core 0.5.1", +] + [[package]] name = "pango" version = "0.16.5" @@ -4125,6 +4253,15 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8" +dependencies = [ + "rand_core 0.5.1", +] + [[package]] name = "raw-window-handle" version = "0.5.2" @@ -4329,6 +4466,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + [[package]] name = "rustc-hash" version = "1.1.0" @@ -4917,6 +5060,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unicode-xid", +] + [[package]] name = "system-configuration" version = "0.5.1" @@ -5294,6 +5449,26 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threshold_crypto" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f708705bce37e765c37a95a8e0221a327c880d5a5a148d522552e8daa85787a" +dependencies = [ + "byteorder", + "failure", + "ff", + "group", + "hex_fmt", + "log", + "pairing", + "rand 0.7.3", + "rand_chacha 0.2.2", + "serde", + "tiny-keccak", + "zeroize", +] + [[package]] name = "tiff" version = "0.8.1" @@ -5343,6 +5518,15 @@ dependencies = [ "time-core", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.6.0" @@ -5662,6 +5846,12 @@ version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + [[package]] name = "unique_id" version = "0.1.5" diff --git a/ng-wallet/src/lib.rs b/ng-wallet/src/lib.rs index af7d5bc..3679788 100644 --- a/ng-wallet/src/lib.rs +++ b/ng-wallet/src/lib.rs @@ -497,9 +497,11 @@ pub async fn connect_wallet( )); continue; } - let broker = broker.unwrap(); + let brokers = broker.unwrap(); let mut tried: Option<(String, String, String, Option, f64)> = None; - for broker_info in broker { + //TODO: on tauri (or forward in local broker, or CLI), prefer a BoxPublic to a Domain. Domain always comes first though, so we need to reorder the list + //TODO: use site.bootstraps to order the list of brokerInfo. + for broker_info in brokers { match broker_info { BrokerInfoV0::ServerV0(server) => { let url = server.get_ws_url(&location).await; @@ -508,7 +510,7 @@ pub async fn connect_wallet( if url.is_some() { let url = url.unwrap(); if url.1.len() == 0 { - // TODO deal with BoxPublic and on tauri all Box... + // TODO deal with Box(Dyn)Public -> tunnel, and on tauri/forward/CLIs, deal with all Box -> direct connections (when url.1.len is > 0) let res = BROKER .write() .await @@ -548,6 +550,7 @@ pub async fn connect_wallet( } } } + // Core information is discarded _ => {} } } diff --git a/ng-wallet/src/types.rs b/ng-wallet/src/types.rs index 321a124..803f9e8 100644 --- a/ng-wallet/src/types.rs +++ b/ng-wallet/src/types.rs @@ -454,15 +454,11 @@ impl WalletLogV0 { if self.is_last_occurrence(op.0, &op.1) != 0 { let _ = wallet.sites.get_mut(&site.to_string()).and_then(|site| { match store_type { - SiteStoreType::Public => { - site.public.root_branch_def_ref = rbdr.clone() - } + SiteStoreType::Public => site.public.read_cap = rbdr.clone(), SiteStoreType::Protected => { - site.protected.root_branch_def_ref = rbdr.clone() - } - SiteStoreType::Private => { - site.private.root_branch_def_ref = rbdr.clone() + site.protected.read_cap = rbdr.clone() } + SiteStoreType::Private => site.private.read_cap = rbdr.clone(), }; None:: }); @@ -472,14 +468,12 @@ impl WalletLogV0 { if self.is_last_occurrence(op.0, &op.1) != 0 { let _ = wallet.sites.get_mut(&site.to_string()).and_then(|site| { match store_type { - SiteStoreType::Public => { - site.public.repo_secret = secret.clone() - } + SiteStoreType::Public => site.public.write_cap = secret.clone(), SiteStoreType::Protected => { - site.protected.repo_secret = secret.clone() + site.protected.write_cap = secret.clone() } SiteStoreType::Private => { - site.private.repo_secret = secret.clone() + site.private.write_cap = secret.clone() } }; None:: @@ -605,7 +599,7 @@ pub enum WalletOperation { AddThirdPartyDataV0((String, Vec)), RemoveThirdPartyDataV0(String), SetSiteRBDRefV0((PubKey, SiteStoreType, ObjectRef)), - SetSiteRepoSecretV0((PubKey, SiteStoreType, SymKey)), + SetSiteRepoSecretV0((PubKey, SiteStoreType, RepoWriteCapSecret)), } use std::collections::hash_map::DefaultHasher; diff --git a/ngd/src/cli.rs b/ngd/src/cli.rs index 62c0c39..f6217bf 100644 --- a/ngd/src/cli.rs +++ b/ngd/src/cli.rs @@ -71,7 +71,7 @@ pub(crate) struct Cli { )] pub public: Option, - /// When --public is used, this option will disallow clients to connect to the public interface too. Otherwise, by default, they can. Should be used in combination with a --domain option + /// When --public or --dynamic is used, this option will disallow clients to connect to the public interface too. Otherwise, by default, they can. Should be used in combination with a --domain option #[arg(long, conflicts_with("private"))] pub public_without_clients: bool, diff --git a/ngd/src/main.rs b/ngd/src/main.rs index 1bdf182..ba043aa 100644 --- a/ngd/src/main.rs +++ b/ngd/src/main.rs @@ -573,7 +573,7 @@ async fn main_inner() -> Result<(), ()> { { if args.domain_peer.is_some() { log_err!( - "--local is not allowed if --domain-peer is selected, and they both use the same port. change the port of one of them. cannot start" + "--local is not allowed if --domain-peer is selected, as they both use the same port. change the port of one of them. cannot start" ); return Err(()); } @@ -591,8 +591,8 @@ async fn main_inner() -> Result<(), ()> { } } - //// --core - + // --core + // core listeners always come after the domain ones, which is good as the first bootstrap in the list should be the domain (if there is also a core_with_clients that generates a BoxPublic bootstrap) if args.core.is_some() { let arg_value = parse_interface_and_port_for(args.core.as_ref().unwrap(), "--core", DEFAULT_PORT)?; diff --git a/p2p-net/src/actors/start.rs b/p2p-net/src/actors/start.rs index f39835b..594cce7 100644 --- a/p2p-net/src/actors/start.rs +++ b/p2p-net/src/actors/start.rs @@ -13,7 +13,7 @@ use crate::actors::noise::Noise; use crate::connection::NoiseFSM; use crate::types::{ AdminRequest, CoreBrokerConnect, CoreBrokerConnectResponse, CoreBrokerConnectResponseV0, - CoreMessage, CoreMessageV0, CoreResponseContentV0, CoreResponseV0, ExtResponse, + CoreMessage, CoreMessageV0, CoreResponse, CoreResponseContentV0, CoreResponseV0, ExtResponse, }; use crate::{actor::*, errors::ProtocolError, types::ProtocolMessage}; use async_std::sync::Mutex; @@ -79,13 +79,13 @@ impl TryFrom for CoreBrokerConnectResponse { type Error = ProtocolError; fn try_from(msg: ProtocolMessage) -> Result { if let ProtocolMessage::CoreMessage(CoreMessage::V0(CoreMessageV0::Response( - CoreResponseV0 { + CoreResponse::V0(CoreResponseV0 { content: CoreResponseContentV0::BrokerConnectResponse(a), .. - }, + }), ))) = msg { - Ok(CoreBrokerConnectResponse::V0(a)) + Ok(a) } else { log_debug!("INVALID {:?}", msg); Err(ProtocolError::InvalidValue) diff --git a/p2p-net/src/types.rs b/p2p-net/src/types.rs index 909bcb3..c1c64c4 100644 --- a/p2p-net/src/types.rs +++ b/p2p-net/src/types.rs @@ -137,7 +137,7 @@ pub enum BrokerServerTypeV0 { BoxPrivate(Vec), BoxPublic(Vec), BoxPublicDyn(Vec), // can be empty - Domain(String), // accepts an option trailing ":port" number + Domain(String), // accepts an optional trailing ":port" number //Core(Vec), } @@ -835,7 +835,7 @@ pub enum AcceptForwardForV0 { PublicDomain((String, String)), /// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server - /// domain can take an option port (trailing `:port`) + /// domain can take an optional port (trailing `:port`) /// second param is the privKey of the PeerId of the proxy server, useful when the proxy server is load balancing to several daemons /// that should all use the same PeerId to answer requests PublicDomainPeer((String, PrivKey, String)), @@ -960,7 +960,7 @@ pub struct ListenerV0 { /// when the box is behind a DMZ, and ipv6 is enabled, the private interface will get the external public IpV6. with this option we allow binding to it pub bind_public_ipv6: bool, - /// default to false. Set to true by --core (use --core-and-clients to override to false). only useful for a public IP listener, if the clients should use another listener like --domain or --domain-private. + /// default to false. Set to true by --core (use --core-with-clients to override to false). only useful for a public IP listener, if the clients should use another listener like --domain or --domain-private. /// do not set it on a --domain or --domain-private, as this will enable the relay_websocket feature, which should not be used except by app.nextgraph.one pub refuse_clients: bool, @@ -1166,6 +1166,9 @@ pub type ForwardedPeerId = PubKey; pub enum PeerId { Direct(DirectPeerId), Forwarded(ForwardedPeerId), + /// BLAKE3 keyed hash over ForwardedPeerId + /// - key: BLAKE3 derive_key ("NextGraph ForwardedPeerId Hash Overlay Id BLAKE3 key", overlayId) + ForwardedObfuscated(Digest), } pub type OuterOverlayId = Digest; @@ -1175,10 +1178,10 @@ pub type InnerOverlayId = Digest; /// Overlay ID /// /// - for outer overlays that need to be discovered by public key: -/// BLAKE3 hash over the repository public key (of root repo) +/// BLAKE3 hash over the public key of the store repo /// - for inner overlays: -/// BLAKE3 keyed hash over the repository public key (of root repo) -/// - key: BLAKE3 derive_key ("NextGraph Overlay Secret BLAKE3 key", root_secret) +/// BLAKE3 keyed hash over the public key of the store repo +/// - key: BLAKE3 derive_key ("NextGraph Overlay ReadCapSecret BLAKE3 key", store repo's overlay's branch ReadCapSecret) #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub enum OverlayId { Outer(OuterOverlayId), @@ -1246,15 +1249,14 @@ impl OverlayAccess { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct InnerOverlayLink { /// overlay public key ID - pub id: Identity, + pub id: StoreOverlay, - /// current root branch definition commit - /// The ref is split in two: id and key. - /// The ID can be omitted if reading the overlay members should not be allowed. - /// In this case, the pinning broker will not be able to subscribe to the overlay root topic - /// and will therefor lose access if the overlay is refreshed. - pub root_branch_def_id: Option, - pub root_branch_def_key: ObjectKey, + /// The store has a special branch called `overlay` that is used to manage access to the InnerOverlay + /// only the ReadCapSecret is needed to access the InnerOverlay + /// the full readcap of this branch is needed in order to subscribe to the topic and decrypt the events. The branchId can be found in the branch Definition + /// it can be useful to subscribe to this topic i the user is at least a reader of the store's repo, so it will be notified of refreshReadCap on the overlay + /// if the user is an external user to the store, it will lose access to the InnerOverlay after a RefreshReadCap of the overlay branch of the store. + pub store_overlay_readcap: ReadCap, } /// Overlay Link @@ -1262,7 +1264,7 @@ pub struct InnerOverlayLink { /// Details of the overlay of an NgLink #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub enum OverlayLink { - Outer(Identity), + Outer(StoreOverlay), Inner(InnerOverlayLink), Inherit, } @@ -1434,7 +1436,7 @@ impl ClientInfo { /// Overlay leave request /// -/// In outerOverlay: informs the broker that the overlay is not need anymore +/// In outerOverlay: informs the broker that the overlay is not needed anymore /// In innerOverlay: Sent to all connected overlay participants to terminate a session #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub enum OverlayLeave { @@ -1520,8 +1522,8 @@ pub enum SubMarker { /// Topic unsubscription request by a subscriber /// -/// A broker unsubscribes from upstream brokers -/// when it has no more subscribers left +/// A broker unsubscribes from all publisher brokers in the overlay +/// when it has no more local subscribers left #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub struct UnsubReqV0 { /// Topic public key @@ -1535,30 +1537,49 @@ pub enum UnsubReq { } /// Content of EventV0 -/// Contains the object of newly published Commit, its optional blocks, and optional refs and their blocks. +/// Contains the objects of newly published Commit, its optional blocks, and optional refs and their blocks. /// If a block is not present in the Event, its ID should be present in block_ids and the block should be put on the emitting broker beforehand with BlocksPut. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct EventContentV0 { /// Pub/sub topic pub topic: TopicId, - pub publisher: ForwardedPeerId, + // TODO: could be obfuscated (or not, if we want to be able to recall events) + // on public repos, should be obfuscated + pub publisher: PeerId, /// Commit sequence number of publisher pub seq: u64, - /// Blocks with encrypted content. First in the list is always the commit block, the others are optional. + /// Blocks with encrypted content. First in the list is always the commit block followed by its children, then its optional header and body blocks (and eventual children), + /// blocks of the REFS are optional (only sent here if user specifically want to push them to the pub/sub). + /// the first in the list MUST contain a commit_header_key + /// When saved locally (the broker keeps the associated event, until the topic is refreshed(the last heads retain their events) ), + /// so, this `blocks` list is emptied (as the blocked are saved in the overlay storage anyway) and their IDs are kept on the side. + /// then when the event needs to be send in reply to a *TopicSyncReq, the blocks list is regenerated from the IDs, + /// so that a valid EventContent can be sent (and so that its signature can be verified successfully) pub blocks: Vec, - /// Ids of additional Blocks with encrypted content that are not to be pushed in the pub/sub + /// Ids of additional Blocks (REFS) with encrypted content that are not to be pushed in the pub/sub + /// they will be retrieved later by interested users pub block_ids: Vec, - /// Encrypted key for the Commit object (the first Block in blocks) - /// The key is encrypted using ChaCha20: - /// - key: BLAKE3 derive_key ("NextGraph Event ObjectRef ChaCha20 key", - /// repo_pubkey + branch_pubkey + branch_secret + publisher) - /// - nonce: commit_seq - pub key: Option, + /// can be : + /// * Encrypted key for the Commit object (the first Block in blocks vec) + /// The ObjectKey is encrypted using ChaCha20: + /// - key: BLAKE3 derive_key ("NextGraph Event Commit ObjectKey ChaCha20 key", + /// RepoId + BranchId + branch_secret(ReadCapSecret of the branch) + publisher) + /// - nonce: commit_seq + /// * If it is a CertificateRefresh, both the blocks and block_ids vectors are empty. + /// the key here contains an encrypted ObjectRef to the new Certificate. + /// The whole ObjectRef is encrypted (including the ID) to avoid correlation of topics who will have the same Certificate ID (belong to the same repo) + /// Encrypted using ChaCha20, with : + /// - key: BLAKE3 derive_key ("NextGraph Event Certificate ObjectRef ChaCha20 key", + /// RepoId + BranchId + branch_secret(ReadCapSecret of the branch) + publisher) + /// it is the same key as above, because the commit_seq will be different (incremented anyway) + /// - nonce: commit_seq + #[serde(with = "serde_bytes")] + pub key: Vec, } /// Pub/sub event published in a topic @@ -1584,7 +1605,7 @@ pub enum Event { /// from a subscriber to one publisher at a time. /// fanout is always 1 /// if result is none, tries another path if several paths available locally -/// answered with a BlockResult +/// answered with a stream of BlockResult #[derive(Clone, Debug, Serialize, Deserialize)] pub struct BlockSearchTopicV0 { /// Topic to forward the request in @@ -1612,7 +1633,7 @@ pub enum BlockSearchTopic { /// Block search along a random walk in the overlay /// fanout is always 1 /// if result is none, tries another path if several paths available locally -/// answered with a BlockResult +/// answered with a stream BlockResult #[derive(Clone, Debug, Serialize, Deserialize)] pub struct BlockSearchRandomV0 { /// List of Block IDs to request @@ -1636,7 +1657,7 @@ pub enum BlockSearchRandom { } /// Response to a BlockSearch* request -/// +/// can be a stream #[derive(Clone, Debug, Serialize, Deserialize)] pub struct BlockResultV0 { /// Resulting Blocks(s) @@ -1644,6 +1665,7 @@ pub struct BlockResultV0 { } /// Response to a BlockSearch* request +/// can be a stream #[derive(Clone, Debug, Serialize, Deserialize)] pub enum BlockResult { V0(BlockResultV0), @@ -1651,8 +1673,7 @@ pub enum BlockResult { /// Topic synchronization request /// -/// In response a stream of `Block`s of the requested Objects are sent -/// that are not present in the requestor's known heads and commits +/// In response a stream of `TopicSyncRes`s containing the missing Commits or events are sent #[derive(Clone, Debug, Serialize, Deserialize)] pub struct TopicSyncReqV0 { /// Topic public key @@ -1661,9 +1682,9 @@ pub struct TopicSyncReqV0 { /// Fully synchronized until these commits pub known_heads: Vec, - /// Known commit IDs since known_heads - // TODO: is this going to be used? - pub known_commits: BloomFilter, + /// Stop synchronizing when these commits are met. + /// if empty, the local HEAD at the responder is used instead + pub target_heads: Vec, } /// Topic synchronization request @@ -1683,11 +1704,6 @@ impl TopicSyncReq { TopicSyncReq::V0(o) => &o.known_heads, } } - pub fn known_commits(&self) -> &BloomFilter { - match self { - TopicSyncReq::V0(o) => &o.known_commits, - } - } } /// Status of a Forwarded Peer, sent in the Advert @@ -1702,11 +1718,12 @@ pub enum PeerStatus { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ForwardedPeerAdvertV0 { /// PeerAdvert received from Client + // TODO: this could be obfuscated when user doesnt want to recall events. pub peer_advert: PeerAdvertV0, /// Hashed user Id, used to prevent concurrent connection from different brokers /// BLAKE3 keyed hash over the UserId - /// - key: BLAKE3 derive_key ("NextGraph Overlay Id BLAKE3 key", overlayId) + /// - key: BLAKE3 derive_key ("NextGraph UserId Hash Overlay Id ForwardedPeerAdvertV0 BLAKE3 key", overlayId) // will always be an Inner overlay pub user_hash: Digest, /// whether the Advert is about connection or disconnection @@ -1915,12 +1932,27 @@ pub struct CoreOverlayJoinedAdvertV0 { pub overlay: OverlayAdvertV0, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreBrokerJoinedAdvert { + V0(CoreBrokerJoinedAdvertV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreBrokerLeftAdvert { + V0(CoreBrokerLeftAdvertV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreOverlayJoinedAdvert { + V0(CoreOverlayJoinedAdvertV0), +} + /// Content of CoreAdvert V0 #[derive(Clone, Debug, Serialize, Deserialize)] pub enum CoreAdvertContentV0 { - BrokerJoined(CoreBrokerJoinedAdvertV0), - BrokerLeft(CoreBrokerLeftAdvertV0), - OverlayJoined(CoreOverlayJoinedAdvertV0), + BrokerJoined(CoreBrokerJoinedAdvert), + BrokerLeft(CoreBrokerLeftAdvert), + OverlayJoined(CoreOverlayJoinedAdvert), } /// CoreAdvert V0 @@ -1955,7 +1987,7 @@ pub struct OverlayAdvertMarkerV0 { /// path from the new broker who started a session, to the broker that is sending the marker pub path: Vec, - /// randomly generated nonce used for the reply (a ReturnPathTimingMarker) that will be sent after receiving the marker + /// randomly generated nonce used for the reply (a ReturnPathTimingMarker) that will be sent back after this marker has been received on the other end pub reply_nonce: u64, } @@ -1963,17 +1995,19 @@ pub struct OverlayAdvertMarkerV0 { #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CoreBlockGetV0 { /// Block ID to request - pub id: BlockId, + pub ids: Vec, /// Whether or not to include all children recursively pub include_children: bool, - /// randomly generated number by requester, used for sending reply. Purpose is to defeat replay attacks in the overlay - /// the requester keeps track of req_nonce and destination peerid. + /// randomly generated number by requester, used for sending reply. + /// the requester keeps track of req_nonce and requested peerid. + /// used for handling the stream pub req_nonce: u64, } /// Core Block Result V0 +/// can be a stream #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CoreBlockResultV0 { /// Resulting Object(s) @@ -1993,13 +2027,33 @@ pub struct ReturnPathTimingAdvertV0 { pub nonce: u64, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum OverlayAdvertMarker { + V0(OverlayAdvertMarkerV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ReturnPathTimingAdvert { + V0(ReturnPathTimingAdvertV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreBlockGet { + V0(CoreBlockGetV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreBlockResult { + V0(CoreBlockResultV0), +} + /// Content of CoreDirectMessage V0 #[derive(Clone, Debug, Serialize, Deserialize)] pub enum CoreDirectMessageContentV0 { - OverlayAdvertMarker(OverlayAdvertMarkerV0), - ReturnPathTimingAdvert(ReturnPathTimingAdvertV0), - BlockGet(CoreBlockGetV0), - BlockResult(CoreBlockResultV0), + OverlayAdvertMarker(OverlayAdvertMarker), + ReturnPathTimingAdvert(ReturnPathTimingAdvert), + BlockGet(CoreBlockGet), + BlockResult(CoreBlockResult), //PostInbox, //PartialSignature, //ClientDirectMessage //for messages between forwarded or direct peers @@ -2050,11 +2104,13 @@ pub enum CoreBrokerConnectResponse { impl CoreBrokerConnect { pub fn core_message(&self, id: i64) -> CoreMessage { match self { - CoreBrokerConnect::V0(v0) => CoreMessage::V0(CoreMessageV0::Request(CoreRequestV0 { - padding: vec![], - id, - content: CoreRequestContentV0::BrokerConnect(v0.clone()), - })), + CoreBrokerConnect::V0(v0) => { + CoreMessage::V0(CoreMessageV0::Request(CoreRequest::V0(CoreRequestV0 { + padding: vec![], + id, + content: CoreRequestContentV0::BrokerConnect(CoreBrokerConnect::V0(v0.clone())), + }))) + } } } } @@ -2066,7 +2122,7 @@ pub type CoreBrokerDisconnectV0 = (); /// // replied with an emptyResponse, and an error code if OverlayId not present on remote broker #[derive(Clone, Debug, Serialize, Deserialize)] pub enum CoreOverlayJoinV0 { - Inner(OverlayAdvertV0), + Inner(OverlayAdvert), Outer(Digest), } @@ -2075,6 +2131,7 @@ pub enum CoreOverlayJoinV0 { pub enum OuterOverlayResponseContentV0 { EmptyResponse(()), Block(Block), + TopicSyncRes(TopicSyncRes), //PostInboxResponse(PostInboxResponse), } @@ -2107,13 +2164,14 @@ pub struct OuterOverlayResponseV0 { /// Core Topic synchronization request /// -/// behaves like BlockSearchTopic (primarily searches among the publishers) +/// behaves like BlockSearchTopic (primarily searches among the publishers, except if search_in_subs is set to true) /// fanout is 1 for now -/// In response a stream of `Block`s of the requested Objects are sent -/// that are not present in the requestor's known heads and commits /// -/// if some target_heads are not found locally, then all successors of known_heads are sent anyway. -/// Then this temporary HEAD is used to propagate the CoreTopicSyncReq to upstream brokers +/// If some target_heads are not found locally, all successors of known_heads are sent anyway, +/// and then this temporary HEAD is used to propagate/fanout the CoreTopicSyncReq to upstream brokers +/// +/// Answered with one or many TopicSyncRes a stream of `Block`s or Event of the commits +/// If the responder has an Event for the commit(s) in its HEAD, it will send the event instead of the plain commit's blocks. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CoreTopicSyncReqV0 { /// Topic public key @@ -2125,12 +2183,9 @@ pub struct CoreTopicSyncReqV0 { /// Fully synchronized until these commits pub known_heads: Vec, - /// Stop synchronizing when these commits are met + /// Stop synchronizing when these commits are met. + /// if empty, the local HEAD at the responder is used instead pub target_heads: Vec, - - /// Known commit IDs since known_heads - // TODO: is this going to be used? - pub known_commits: BloomFilter, } /// Topic synchronization request @@ -2139,16 +2194,45 @@ pub enum CoreTopicSyncReq { V0(CoreTopicSyncReqV0), } +/// Topic synchronization response V0 +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum TopicSyncResV0 { + Event(Event), + Block(Block), +} + +/// Topic synchronization response +/// it is a stream of blocks and or events. +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum TopicSyncRes { + V0(TopicSyncResV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreBrokerDisconnect { + V0(CoreBrokerDisconnectV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreOverlayJoin { + V0(CoreOverlayJoinV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum OuterOverlayRequest { + V0(OuterOverlayRequestV0), +} + /// Content of CoreRequest V0 #[derive(Clone, Debug, Serialize, Deserialize)] pub enum CoreRequestContentV0 { - BrokerConnect(CoreBrokerConnectV0), - BrokerDisconnect(CoreBrokerDisconnectV0), - OverlayJoin(CoreOverlayJoinV0), - BlockSearchTopic(BlockSearchTopicV0), - BlockSearchRandom(BlockSearchRandomV0), - TopicSyncReq(CoreTopicSyncReqV0), - OuterOverlayRequest(OuterOverlayRequestV0), + BrokerConnect(CoreBrokerConnect), + BrokerDisconnect(CoreBrokerDisconnect), + OverlayJoin(CoreOverlayJoin), + BlockSearchTopic(BlockSearchTopic), + BlockSearchRandom(BlockSearchRandom), + TopicSyncReq(CoreTopicSyncReq), + OuterOverlayRequest(OuterOverlayRequest), } /// CoreRequest V0 @@ -2178,12 +2262,18 @@ pub struct CoreBrokerConnectResponseV0 { pub errors: Vec, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum OuterOverlayResponse { + V0(OuterOverlayResponseV0), +} + /// Content CoreResponse V0 #[derive(Clone, Debug, Serialize, Deserialize)] pub enum CoreResponseContentV0 { - BrokerConnectResponse(CoreBrokerConnectResponseV0), - BlockResult(BlockResultV0), - OuterOverlayResponse(OuterOverlayResponseV0), + BrokerConnectResponse(CoreBrokerConnectResponse), + BlockResult(BlockResult), + TopicSyncRes(TopicSyncRes), + OuterOverlayResponse(OuterOverlayResponse), EmptyResponse(()), } @@ -2226,15 +2316,30 @@ pub struct OuterOverlayMessageV0 { pub padding: Vec, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreAdvert { + V0(CoreAdvertV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum CoreDirectMessage { + V0(CoreDirectMessageV0), +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum OuterOverlayMessage { + V0(OuterOverlayMessageV0), +} + /// CoreMessageV0 #[derive(Clone, Debug, Serialize, Deserialize)] pub enum CoreMessageV0 { - Request(CoreRequestV0), - Response(CoreResponseV0), - Advert(CoreAdvertV0), - Direct(CoreDirectMessageV0), - InnerOverlay(InnerOverlayMessageV0), - OuterOverlay(OuterOverlayMessageV0), + Request(CoreRequest), + Response(CoreResponse), + Advert(CoreAdvert), + Direct(CoreDirectMessage), + InnerOverlay(InnerOverlayMessage), + OuterOverlay(OuterOverlayMessage), } /// Core message @@ -2475,6 +2580,8 @@ pub struct OpenRepoV0 { pub overlay: OverlayAccess, /// Broker peers to connect to in order to join the overlay + /// can be empty for private store (the broker will not connect to any other broker) + /// but if the private repo is pinned in other brokers, those brokers should be entered here from syncing. pub peers: Vec, /// Maximum number of peers to connect to for this overlay (only valid for an inner (RW/WO) overlay) @@ -2502,15 +2609,6 @@ impl OpenRepo { } } -/// Block pinning strategy. When Pinning a repo, user can choose to Pin locally on the broker: -/// all their published commits (if they are publisher) or all the commits of all the users. -#[derive(Clone, Debug, Serialize, Deserialize)] -pub enum BlockPinningStrategy { - MyCommits, - AllCommits, - None, -} - /// Request to pin a repo on the broker. /// When client will disconnect, the subscriptions and publisherAdvert of the topics will be remain active on the broker, #[derive(Clone, Debug, Serialize, Deserialize)] @@ -2542,11 +2640,8 @@ pub struct PinRepoV0 { /// only possible with inner (RW or WO) overlays. /// If the repo has previously been opened (during the same session) then rw_topics info can be omitted pub rw_topics: Vec, - - /// Pin incoming commits' blocks (for subscribed topics) - pub pin_all_events: bool, // TODO pub inbox_proof - - // TODO pub signer_proof + // TODO pub inbox_proof + // TODO pub signer_proof } /// Request to pin a repo @@ -2563,6 +2658,35 @@ impl PinRepo { } } +/// Request to refresh the Pinning of a previously pinned repo. +/// it can consist of updating the expose_outer, the list of ro_topics and/or rw_topics, +/// and in case of a ban_member, the broker will effectively flush the topics locally after all local members except the banned one, have refreshed +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RefreshPinRepoV0 { + /// The new PinRepo info + pub pin: PinRepo, + + /// optional hashed member ID that should be banned + pub ban_member: Option, + + /// when banning, list of topics that are to be flushed (once all the local members have left, except the one to be banned) + /// All the honest local members have to send this list in order for the banned one to be effectively banned + /// for each Topic, a signature over the hashed UserId to ban, by the Topic private key. + /// The banning process on the broker is meant to flush topics that would remain dangling if the malicious member would not unpin them after being removed from members of repo. + /// The userId of banned user is revealed to the local broker where it was attached, which is a breach of privacy deemed acceptable + /// as only a broker that already knew the userid will enforce it, and + /// that broker might be interested to know that the offending user was banned from a repo, as only malicious users are banned. + /// The broker might also discard this information, and just proceeed with the flush without much ado. + /// Of course, if the broker is controlled by the malicious user, it might not proceed with the ban/flush. But who cares. That broker will keep old data forever, but it is a malicious broker anyway. + pub flush_topics: Vec<(TopicId, Sig)>, +} + +/// Request to pin a repo +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum RefreshPinRepo { + V0(RefreshPinRepoV0), +} + /// Request to unpin a repo on the broker. /// When client will disconnect, the subscriptions and publisherAdvert of the topics will be removed on the broker /// (for that user only. other users might continue to have the repo pinned) @@ -2680,16 +2804,18 @@ pub enum TopicUnsub { } /// Request a Block by ID +/// commit_header_key is always set to None in the reply when request is made on OuterOverlay of protected or Group overlays #[derive(Clone, Debug, Serialize, Deserialize)] pub struct BlockGetV0 { - /// Block ID to request - pub id: BlockId, + /// Block IDs to request + pub ids: Vec, /// Whether or not to include all children recursively pub include_children: bool, - /// Topic the object is referenced from - pub topic: Option, + /// Topic the object is referenced from, if it is known by the requester. + /// can be used to do a BlockSearchTopic in the core overlay. + pub topic: Option, } /// Request an object by ID @@ -2699,9 +2825,9 @@ pub enum BlockGet { } impl BlockGet { - pub fn id(&self) -> BlockId { + pub fn ids(&self) -> &Vec { match self { - BlockGet::V0(o) => o.id, + BlockGet::V0(o) => &o.ids, } } pub fn include_children(&self) -> bool { @@ -2738,6 +2864,7 @@ impl BlocksPut { } /// Request to know if some blocks are present locally +/// used by client before publishing an event, to know what to push #[derive(Clone, Debug, Serialize, Deserialize)] pub struct BlocksExistV0 { /// Ids of Blocks to check @@ -2826,27 +2953,27 @@ impl ObjectDel { /// Content of `ClientRequestV0` #[derive(Clone, Debug, Serialize, Deserialize)] pub enum ClientRequestContentV0 { - OpenRepo(OpenRepoV0), - PinRepo(PinRepoV0), - UnpinRepo(UnpinRepoV0), - RepoPinStatusReq(RepoPinStatusReqV0), + OpenRepo(OpenRepo), + PinRepo(PinRepo), + UnpinRepo(UnpinRepo), + RepoPinStatusReq(RepoPinStatusReq), // once repo is opened or pinned: - TopicSub(TopicSubV0), - TopicUnsub(TopicUnsubV0), + TopicSub(TopicSub), + TopicUnsub(TopicUnsub), - BlocksExist(BlocksExistV0), - BlockGet(BlockGetV0), - TopicSyncReq(TopicSyncReqV0), + BlocksExist(BlocksExist), + BlockGet(BlockGet), + TopicSyncReq(TopicSyncReq), // For Pinned Repos only : - ObjectPin(ObjectPinV0), - ObjectUnpin(ObjectUnpinV0), - ObjectDel(ObjectDelV0), + ObjectPin(ObjectPin), + ObjectUnpin(ObjectUnpin), + ObjectDel(ObjectDel), // For InnerOverlay's only : - BlocksPut(BlocksPutV0), - PublishEvent(EventV0), + BlocksPut(BlocksPut), + PublishEvent(Event), } /// Broker overlay request #[derive(Clone, Debug, Serialize, Deserialize)] @@ -2918,8 +3045,9 @@ impl BlocksFound { pub enum ClientResponseContentV0 { EmptyResponse, Block(Block), - BlocksFound(BlocksFoundV0), - RepoPinStatus(RepoPinStatusV0), + TopicSyncRes(TopicSyncRes), + BlocksFound(BlocksFound), + RepoPinStatus(RepoPinStatus), } /// Response to a `ClientRequest` @@ -2975,6 +3103,7 @@ pub enum ClientMessageContentV0 { ClientRequest(ClientRequest), ClientResponse(ClientResponse), ForwardedEvent(Event), + ForwardedBlock(Block), } /// Broker message for an overlay #[derive(Clone, Debug, Serialize, Deserialize)] @@ -3030,7 +3159,8 @@ impl ClientMessage { ClientMessage::V0(o) => match &o.content { ClientMessageContentV0::ClientResponse(r) => r.id(), ClientMessageContentV0::ClientRequest(r) => r.id(), - ClientMessageContentV0::ForwardedEvent(_) => { + ClientMessageContentV0::ForwardedEvent(_) + | ClientMessageContentV0::ForwardedBlock(_) => { panic!("it is an event") } }, @@ -3041,7 +3171,8 @@ impl ClientMessage { ClientMessage::V0(o) => match &mut o.content { ClientMessageContentV0::ClientResponse(ref mut r) => r.set_id(id), ClientMessageContentV0::ClientRequest(ref mut r) => r.set_id(id), - ClientMessageContentV0::ForwardedEvent(_) => { + ClientMessageContentV0::ForwardedEvent(_) + | ClientMessageContentV0::ForwardedBlock(_) => { panic!("it is an event") } }, @@ -3051,10 +3182,9 @@ impl ClientMessage { match self { ClientMessage::V0(o) => match &o.content { ClientMessageContentV0::ClientResponse(r) => r.result(), - ClientMessageContentV0::ClientRequest(r) => { - panic!("it is not a response"); - } - ClientMessageContentV0::ForwardedEvent(_) => { + ClientMessageContentV0::ClientRequest(_) + | ClientMessageContentV0::ForwardedEvent(_) + | ClientMessageContentV0::ForwardedBlock(_) => { panic!("it is not a response"); } }, @@ -3064,10 +3194,9 @@ impl ClientMessage { match self { ClientMessage::V0(o) => match &o.content { ClientMessageContentV0::ClientResponse(r) => r.block(), - ClientMessageContentV0::ClientRequest(r) => { - panic!("it is not a response"); - } - ClientMessageContentV0::ForwardedEvent(_) => { + ClientMessageContentV0::ClientRequest(_) + | ClientMessageContentV0::ForwardedEvent(_) + | ClientMessageContentV0::ForwardedBlock(_) => { panic!("it is not a response"); } }, @@ -3463,42 +3592,51 @@ impl From for ProtocolMessage { } // -// DIRECT / OUT-OF-BAND MESSAGES +// LINKS // -/// Link/invitation to the repository +/// Link to a repository +/// Consists of an identifier (repoid), a ReadCap or WriteCap, and a locator (peers and overlayLink) +/// Those capabilities are not durable: They can be refreshed by the members and previously shared Caps will become obsolete/revoked. +/// As long as the user is a member of the repo and subscribes to the root topic (of the repo, and of the store if needed/applicable), they will receive the updated capabilities. +/// But if they don't subscribe, they will lose access after the refresh. +/// For durable read capabilities of non-members, see PermaReadCap. +/// In most cases, the link is shared and the recipient opens it and subscribes soon afterward. +/// Perma capabilities are needed only when the link is stored on disk and kept there unopened for a long period. #[derive(Clone, Debug, Serialize, Deserialize)] pub struct RepoLinkV0 { - /// Repository public key ID - pub id: Identity, + /// Repository ID + pub id: RepoId, - pub overlay: OverlayLink, + /// read capability for the whole repo + /// current (at the time of sharing the link) root branch definition commit + pub read_cap: ReadCap, - /// Repository secret. Only set for editors - pub repo_secret: Option, + /// Write capability secret. Only set for editors. in this case, overlay MUST be set to an InnerOverlay + pub write_cap_secret: Option, - /// current root branch definition commit - pub root_branch_def_ref: ObjectRef, + /// Current overlay link, used to join the overlay + pub overlay: OverlayLink, /// Peer brokers to connect to pub peers: Vec, } -/// Link/invitation to the repository +/// Link to a repository #[derive(Clone, Debug, Serialize, Deserialize)] pub enum RepoLink { V0(RepoLinkV0), } impl RepoLink { - pub fn id(&self) -> &Identity { + pub fn id(&self) -> &RepoId { match self { RepoLink::V0(o) => &o.id, } } - pub fn secret(&self) -> &Option { + pub fn write_cap_secret(&self) -> &Option { match self { - RepoLink::V0(o) => &o.repo_secret, + RepoLink::V0(o) => &o.write_cap_secret, } } pub fn peers(&self) -> &Vec { @@ -3508,43 +3646,122 @@ impl RepoLink { } } -/// Link to object(s) or to a branch from a repository -/// that can be shared to non-members +/// The latest ReadCap of the branch (or main branch) will be downloaded from the outerOverlay, if the peer brokers listed below allow it. +/// The snapshot can be downloaded instead +/// This locator is durable, because the public site are served differently by brokers. #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ObjectLinkV0 { - /// Request to send to an overlay peer - pub req: ExtRequest, +pub struct PublicRepoLocatorV0 { + /// Repository ID + pub repo: RepoId, + + /// optional branchId to access. a specific public branch, + /// if not set, the main branch of the repo will be used. + pub branch: Option, + + /// optional commits of head to access. + /// if not set, the main branch of the repo will be used. + pub heads: Vec, + + /// optional snapshot to download, in order to display the content quicker to end-user. + pub snapshot: Option, + + /// The public site store + pub public_store: PubKey, + + /// Peer brokers to connect to + pub peers: Vec, +} - /// Keys for the root blocks of the requested objects - pub keys: Vec, +/// Link to a public repository +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum PublicRepoLocator { + V0(PublicRepoLocatorV0), } -/// Link to object(s) or to a branch from a repository -/// that can be shared to non-members +/// Read access to a branch of a Public, Protected or Group store. +/// The overlay to join can be the outer or the inner, depending on what was offered in the link. +/// The difference between the two is that in the outer overlay, only one broker is contacted. +/// In the inner overlay, all the publisher's brokers are contacted, so subscription to the pub/sub is more reliable, less prone to outage. +/// This is not a durable link. If the topic has been refreshed, the pubsub won't be able to be subscribed to, +/// but TopicSyncReq will still work (answering the commits up until the moment the topic was refreshed) +/// and the optional heads will always be retrievable #[derive(Clone, Debug, Serialize, Deserialize)] -pub enum ObjectLink { - V0(ObjectLinkV0), +pub struct ReadBranchLinkV0 { + /// Repository ID + pub repo: RepoId, + + pub branch: BranchId, // must match the one in read_cap + + /// an optional list of heads that can fetched in this branch + /// useful if a specific head is to be shared + pub heads: Vec, + + /// read capability for the branch + /// current (at the time of sharing the link) branch definition commit + pub read_cap: ReadCap, + + /// Current overlay link, used to join the overlay, most of the time, an outerOverlay is preferred + pub overlay: OverlayLink, + + /// Peer brokers to connect to + pub peers: Vec, } -/// Owned repository with private key +/// Link to a repository #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct RepoKeysV0 { - /// Repository private key - pub key: PrivKey, +pub enum ReadBranchLink { + V0(ReadBranchLinkV0), +} - /// Repository secret - pub secret: SymKey, +/// Obtains one or more objects of a repo (Commit, File) by their ID. +/// On an outerOverlay, the header is always emptied (no way to reconstruct the DAG of commits) except on public overlays or if a topicId is provided +/// If the intent is to share a whole DAG of commits at a definite CommitID/HEAD, then ReadBranchLink should be used instead (or PublicRepoLocator if public site) +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ObjectLinkV0 { + /// Repository ID: not used to make the request. but useful for commits, to know which repo they are from without needing to fetch and open the full DAG of commits. + /// (but the one here might be wrong. only when opening the DAG can the real repo be known. also note that on outerOverlay of non public stores, the DAG is not accessible) + /// note that it could be omitted, specially if the objects are files. As files are content-addressable and belong to an overlay but not to a specific repo or topic. + pub repo: Option, + + /// An optional topic that will be used to retrieve the Certificate of a commit, if needed + /// (topic has to be checked with the one inside the commit. the one here might be wrong. it is provided here as an optimization) + /// or can be used to help with BlockSearchTopic. + /// If the topic is provided, a TopicSyncReq can be performed, and the causal past of the commit will appear (by repeated tried while narrowing down on the ancestors), + /// hence defeating the "emptied header" protection + pub topic: Option, + + pub objects: Vec, + + /// Overlay to join + pub overlay: OverlayLink, - /// Peers to connect to + /// Peer brokers to connect to pub peers: Vec, } -/// Owned repository with private key +/// Link to a specific commit, without its causal past #[derive(Clone, Debug, Serialize, Deserialize)] -pub enum RepoKeys { - V0(RepoKeysV0), +pub enum ObjectLink { + V0(ObjectLinkV0), } +/// NextGraph Link V0 +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum NgLinkV0 { + Repo(RepoLink), + PublicRepo(PublicRepoLocator), + Branch(ReadBranchLink), + Object(ObjectLink), +} + +/// NextGraph Link +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum NgLink { + V0(NgLinkV0), +} + +// TODO: PermaLinks and PostInbox (and ExtRequests) + #[cfg(test)] mod test { diff --git a/p2p-repo/Cargo.toml b/p2p-repo/Cargo.toml index d01b36a..eb42360 100644 --- a/p2p-repo/Cargo.toml +++ b/p2p-repo/Cargo.toml @@ -27,8 +27,10 @@ web-time = "0.2.0" wasm-bindgen = "0.2" slice_as_array = "1.1.0" curve25519-dalek = "3.2.0" +threshold_crypto = "0.4.0" zeroize = { version = "1.6.0", features = ["zeroize_derive"] } time = { version= "0.3.23", features = ["formatting"] } +once_cell = "1.17.1" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] debug_print = "1.0.0" diff --git a/p2p-repo/src/block.rs b/p2p-repo/src/block.rs index b206b8b..43dca77 100644 --- a/p2p-repo/src/block.rs +++ b/p2p-repo/src/block.rs @@ -16,16 +16,18 @@ use crate::types::*; impl BlockV0 { pub fn new( children: Vec, - header_ref: Option, + mut header_ref: Option, content: Vec, key: Option, ) -> BlockV0 { - let (commit_header_id, commit_header_key) = header_ref.map_or((None, None), |obj_ref| { - (Some(obj_ref.id), Some(obj_ref.key)) - }); + let (commit_header, commit_header_key) = header_ref + .take() + .map_or((CommitHeaderObject::None, None), |obj_ref| { + (obj_ref.obj, Some(obj_ref.key)) + }); let bc = BlockContentV0 { children, - commit_header_id, + commit_header: commit_header, encrypted_content: content, }; let mut b = BlockV0 { @@ -65,12 +67,12 @@ impl BlockContent { } } - /// Get the header id - pub fn header_id(&self) -> &Option { - match self { - BlockContent::V0(bc) => &bc.commit_header_id, - } - } + // /// Get the header id + // pub fn header_id(&self) -> &Option { + // match self { + // BlockContent::V0(bc) => &bc.commit_header_id, + // } + // } /// Get the children pub fn children(&self) -> &Vec { @@ -83,7 +85,7 @@ impl BlockContent { impl Block { pub fn new( children: Vec, - header_ref: Option, + header_ref: Option, content: Vec, key: Option, ) -> Block { @@ -136,13 +138,20 @@ impl Block { } } - /// Get the header - pub fn header_ref(&self) -> Option { + /// Get the header reference + pub fn header_ref(&self) -> Option { match self { - Block::V0(b) => b.commit_header_key.as_ref().map(|key| ObjectRef { - key: key.clone(), - id: b.content.header_id().unwrap().clone(), - }), + Block::V0(b) => match b.commit_header_key.as_ref() { + Some(key) => match b.content.commit_header_obj() { + CommitHeaderObject::None => None, + _ => Some(CommitHeaderRef { + obj: b.content.commit_header_obj().clone(), + key: key.clone(), + }), + }, + + None => None, + }, } } diff --git a/p2p-repo/src/branch.rs b/p2p-repo/src/branch.rs index 5b4ae4e..465e779 100644 --- a/p2p-repo/src/branch.rs +++ b/p2p-repo/src/branch.rs @@ -24,17 +24,17 @@ impl BranchV0 { pub fn new( id: PubKey, repo: ObjectRef, - root_branch_def_id: ObjectId, + root_branch_readcap_id: ObjectId, topic_priv: PrivKey, metadata: Vec, ) -> BranchV0 { let topic_privkey: Vec = vec![]; - //TODO: topic_privkey is topic_priv encrypted with the repo_secret, branch_id, topic_id + //TODO: topic_privkey is topic_priv encrypted with RepoWriteCapSecret, TopicId, BranchId let topic = topic_priv.to_pub(); BranchV0 { id, repo, - root_branch_def_id, + root_branch_readcap_id, topic, topic_privkey, metadata, @@ -46,14 +46,14 @@ impl Branch { pub fn new( id: PubKey, repo: ObjectRef, - root_branch_def_id: ObjectId, + root_branch_readcap_id: ObjectId, topic_priv: PrivKey, metadata: Vec, ) -> Branch { Branch::V0(BranchV0::new( id, repo, - root_branch_def_id, + root_branch_readcap_id, topic_priv, metadata, )) @@ -81,8 +81,8 @@ impl Branch { visited: &mut HashSet, missing: &mut HashSet, ) -> Result { - //log_debug!(">>> load_branch: {}", cobj.id()); let id = cobj.id(); + //log_debug!(">>> load_branch: {}", id); // root has no acks let is_root = cobj.is_root(); @@ -180,17 +180,23 @@ mod test { pub fn test_branch() { fn add_obj( content: ObjectContentV0, - header: Option, - repo_pubkey: PubKey, - repo_secret: SymKey, + header: Option, + store_pubkey: &StoreRepo, + store_secret: &ReadCapSecret, store: &Box, ) -> ObjectRef { let max_object_size = 4000; - let obj = Object::new(content, header, max_object_size, repo_pubkey, repo_secret); + let mut obj = Object::new( + ObjectContent::V0(content), + header, + max_object_size, + store_pubkey, + store_secret, + ); log_debug!(">>> add_obj"); log_debug!(" id: {:?}", obj.id()); log_debug!(" header: {:?}", obj.header()); - obj.save(store).unwrap(); + obj.save_in_test(store).unwrap(); obj.reference().unwrap() } @@ -202,11 +208,11 @@ mod test { deps: Vec, acks: Vec, body_ref: ObjectRef, - repo_pubkey: PubKey, - repo_secret: SymKey, + store_pubkey: &StoreRepo, + store_secret: &ReadCapSecret, store: &Box, ) -> ObjectRef { - let header = CommitHeaderV0::new_with_deps_and_acks( + let header = CommitHeader::new_with_deps_and_acks( deps.iter().map(|r| r.id).collect(), acks.iter().map(|r| r.id).collect(), ); @@ -236,45 +242,45 @@ mod test { .unwrap(); //log_debug!("commit: {:?}", commit); add_obj( - ObjectContentV0::Commit(commit), + ObjectContentV0::Commit(Commit::V0(commit)), header, - repo_pubkey, - repo_secret, + store_pubkey, + store_secret, store, ) } fn add_body_branch( branch: BranchV0, - repo_pubkey: PubKey, - repo_secret: SymKey, + store_pubkey: &StoreRepo, + store_secret: &ReadCapSecret, store: &Box, ) -> ObjectRef { - let body = CommitBodyV0::Branch(branch); + let body: CommitBodyV0 = CommitBodyV0::Branch(Branch::V0(branch)); //log_debug!("body: {:?}", body); add_obj( - ObjectContentV0::CommitBody(body), + ObjectContentV0::CommitBody(CommitBody::V0(body)), None, - repo_pubkey, - repo_secret, + store_pubkey, + store_secret, store, ) } fn add_body_trans( - header: Option, - repo_pubkey: PubKey, - repo_secret: SymKey, + header: Option, + store_pubkey: &StoreRepo, + store_secret: &ReadCapSecret, store: &Box, ) -> ObjectRef { let content = [7u8; 777].to_vec(); - let body = CommitBodyV0::Transaction(content); + let body = CommitBodyV0::AsyncTransaction(Transaction::V0(content)); //log_debug!("body: {:?}", body); add_obj( - ObjectContentV0::CommitBody(body), + ObjectContentV0::CommitBody(CommitBody::V0(body)), header, - repo_pubkey, - repo_secret, + store_pubkey, + store_secret, store, ) } @@ -298,6 +304,7 @@ mod test { let repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes()); let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes()); let repo_secret = SymKey::ChaCha20Key([9; 32]); + let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(repo_pubkey)); // branch @@ -314,8 +321,8 @@ mod test { let repo = Repo::new_with_member( &repo_pubkey, - member_pubkey, - &[Permission::Transaction], + &member_pubkey, + &[PermissionV0::WriteAsync], store, ); @@ -352,14 +359,10 @@ mod test { // commit bodies - let branch_body = add_body_branch( - branch.clone(), - repo_pubkey.clone(), - repo_secret.clone(), - repo.get_store(), - ); + let branch_body = + add_body_branch(branch.clone(), &store_repo, &repo_secret, repo.get_store()); - let trans_body = add_body_trans(None, repo_pubkey, repo_secret.clone(), repo.get_store()); + let trans_body = add_body_trans(None, &store_repo, &repo_secret, repo.get_store()); // create & add commits to store @@ -372,8 +375,8 @@ mod test { vec![], vec![], branch_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); @@ -386,8 +389,8 @@ mod test { vec![br.clone()], vec![], trans_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); @@ -400,8 +403,8 @@ mod test { vec![br.clone()], vec![], trans_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); @@ -428,8 +431,8 @@ mod test { vec![t2.clone()], vec![t1.clone()], trans_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); @@ -442,8 +445,8 @@ mod test { vec![t1.clone(), t2.clone()], vec![t4.clone()], trans_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); @@ -456,8 +459,8 @@ mod test { vec![t4.clone()], vec![], trans_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); @@ -470,13 +473,13 @@ mod test { vec![t4.clone()], vec![], trans_body.clone(), - repo_pubkey, - repo_secret.clone(), + &store_repo, + &repo_secret, repo.get_store(), ); - let c7 = Commit::load(a7.clone(), repo.get_store()).unwrap(); - c7.verify(&repo, repo.get_store()).unwrap(); + let mut c7 = Commit::load(a7.clone(), repo.get_store(), true).unwrap(); + c7.verify(&repo).unwrap(); let mut filter = Filter::new(FilterBuilder::new(10, 0.01)); for commit_ref in [br, t1, t2, t5.clone(), a6.clone()] { diff --git a/p2p-repo/src/commit.rs b/p2p-repo/src/commit.rs index 25e9800..864a8c1 100644 --- a/p2p-repo/src/commit.rs +++ b/p2p-repo/src/commit.rs @@ -12,6 +12,7 @@ //! Commit use ed25519_dalek::*; +use once_cell::sync::OnceCell; use crate::errors::NgError; use crate::log::*; @@ -26,9 +27,13 @@ use std::iter::FromIterator; pub enum CommitLoadError { MissingBlocks(Vec), ObjectParseError, - DeserializeError, + NotACommitError, + NotACommitBodyError, CannotBeAtRootOfBranch, MustBeAtRootOfBranch, + BodyLoadError, + HeaderLoadError, + BodyTypeMismatch, } #[derive(Debug)] @@ -56,9 +61,10 @@ impl CommitV0 { metadata: Vec, body: ObjectRef, ) -> Result { - let headers = CommitHeaderV0::new_with(deps, ndeps, acks, nacks, refs, nrefs); + let headers = CommitHeader::new_with(deps, ndeps, acks, nacks, refs, nrefs); let content = CommitContentV0 { - author: author_pubkey, + perms: vec![], + author: (&author_pubkey).into(), seq, branch, header_keys: headers.1, @@ -81,11 +87,12 @@ impl CommitV0 { ss[1].copy_from_slice(it.next().unwrap()); let sig = Sig::Ed25519Sig(ss); Ok(CommitV0 { - content, + content: CommitContent::V0(content), sig, id: None, key: None, header: headers.0, + body: OnceCell::new(), }) } } @@ -125,10 +132,38 @@ impl Commit { .map(|c| Commit::V0(c)) } + pub fn save( + &mut self, + block_size: usize, + store_pubkey: &StoreRepo, + store_secret: &ReadCapSecret, + store: &Box, + ) -> Result { + match self { + Commit::V0(v0) => { + let mut obj = Object::new( + ObjectContent::V0(ObjectContentV0::Commit(Commit::V0(v0.clone()))), + v0.header.clone(), + block_size, + store_pubkey, + store_secret, + ); + obj.save(store)?; + if let Some(h) = &mut v0.header { + h.set_id(obj.header().as_ref().unwrap().id().unwrap()); + } + self.set_id(obj.get_and_save_id()); + self.set_key(obj.key().unwrap()); + Ok(obj.reference().unwrap()) + } + } + } + /// Load commit from store pub fn load( commit_ref: ObjectRef, store: &Box, + with_body: bool, ) -> Result { let (id, key) = (commit_ref.id, commit_ref.key); match Object::load(id, Some(key.clone()), store) { @@ -138,14 +173,17 @@ impl Commit { .map_err(|_e| CommitLoadError::ObjectParseError)?; let mut commit = match content { ObjectContent::V0(ObjectContentV0::Commit(c)) => c, - _ => return Err(CommitLoadError::DeserializeError), + _ => return Err(CommitLoadError::NotACommitError), }; - commit.id = Some(id); - commit.key = Some(key.clone()); - if let Some(CommitHeader::V0(header_v0)) = obj.header() { - commit.header = Some(header_v0.clone()); + commit.set_id(id); + commit.set_key(key.clone()); + commit.set_header(obj.header().clone()); + + if with_body { + commit.load_body(store)?; } - Ok(Commit::V0(commit)) + + Ok(commit) } Err(ObjectParseError::MissingBlocks(missing)) => { Err(CommitLoadError::MissingBlocks(missing)) @@ -158,8 +196,10 @@ impl Commit { pub fn load_body( &self, store: &Box, - ) -> Result { - // TODO store body in CommitV0 (with #[serde(skip)]) as a cache for subsequent calls to load_body + ) -> Result<&CommitBody, CommitLoadError> { + if self.body().is_some() { + return Ok(self.body().unwrap()); + } let content = self.content_v0(); let (id, key) = (content.body.id, content.body.key.clone()); let obj = Object::load(id.clone(), Some(key.clone()), store).map_err(|e| match e { @@ -170,39 +210,69 @@ impl Commit { .content() .map_err(|_e| CommitLoadError::ObjectParseError)?; match content { - ObjectContent::V0(ObjectContentV0::CommitBody(body)) => Ok(CommitBody::V0(body)), - _ => Err(CommitLoadError::DeserializeError), + ObjectContent::V0(ObjectContentV0::CommitBody(body)) => { + self.set_body(body); + Ok(self.body().unwrap()) + } + _ => Err(CommitLoadError::NotACommitBodyError), } } - /// Get ID of parent `Object` + fn set_body(&self, body: CommitBody) { + match self { + Commit::V0(c) => { + c.body.set(body).unwrap(); + } + } + } + + /// Get ID of including `Object`, + /// only available if the Commit was loaded from store or saved pub fn id(&self) -> Option { match self { Commit::V0(c) => c.id, } } - /// Set ID of parent `Object` - pub fn set_id(&mut self, id: ObjectId) { + /// Get ID of header `Object` + pub fn header_id(&self) -> &Option { + match self { + Commit::V0(CommitV0 { + header: Some(ch), .. + }) => ch.id(), + _ => &None, + } + } + + /// Set ID of including `Object` + fn set_id(&mut self, id: ObjectId) { match self { Commit::V0(c) => c.id = Some(id), } } - /// Get key of parent `Object` + /// Get key of including `Object` + /// only available if the Commit was loaded from store or saved pub fn key(&self) -> Option { match self { Commit::V0(c) => c.key.clone(), } } - /// Set key of parent `Object` - pub fn set_key(&mut self, key: SymKey) { + /// Set key of including `Object` + fn set_key(&mut self, key: SymKey) { match self { Commit::V0(c) => c.key = Some(key), } } + /// Set header of including `Object` + fn set_header(&mut self, header: Option) { + match self { + Commit::V0(c) => c.header = header, + } + } + /// Get commit signature pub fn sig(&self) -> &Sig { match self { @@ -210,32 +280,90 @@ impl Commit { } } + /// Get commit signature + pub fn header(&self) -> &Option { + match self { + Commit::V0(c) => &c.header, + } + } + /// Get commit content V0 pub fn content_v0(&self) -> &CommitContentV0 { match self { - Commit::V0(c) => &c.content, + Commit::V0(CommitV0 { + content: CommitContent::V0(c), + .. + }) => c, + } + } + + pub fn body(&self) -> Option<&CommitBody> { + match self { + Commit::V0(c) => c.body.get(), + } + } + + pub fn owners_signature_required( + &self, + store: &Box, + ) -> Result { + match self.load_body(store)? { + CommitBody::V0(CommitBodyV0::UpdateRootBranch(new_root)) => { + // load deps (the previous RootBranch commit) + let deps = self.deps(); + if deps.len() != 1 { + Err(CommitLoadError::HeaderLoadError) + } else { + let previous_rootbranch_commit = Commit::load(deps[0].clone(), store, true)?; + let previous_rootbranch = previous_rootbranch_commit + .body() + .unwrap() + .root_branch_commit()?; + if previous_rootbranch.owners() != new_root.owners() { + Ok(true) + } else { + Ok(false) + } + } + } + CommitBody::V0(CommitBodyV0::RootBranch(_)) => { + let deps = self.deps(); + let acks = self.acks(); + if deps.len() == 0 && acks.len() == 1 { + // we check that the ACK is the repository singleton commit. in this case, it means we are dealing with the first RootBranch commit, which is fine to have no deps. + let causal_past = Commit::load(acks[0].clone(), store, true)?; + if causal_past.body().unwrap().is_repository_singleton_commit() { + return Ok(false); + } + } + Err(CommitLoadError::HeaderLoadError) + } + _ => Ok(false), } } /// This commit is the first one in the branch (doesn't have any ACKs nor Nacks) pub fn is_root_commit_of_branch(&self) -> bool { match self { - Commit::V0(c) => match &c.content.header_keys { - Some(hk) => hk.acks.is_empty() && hk.nacks.is_empty(), + Commit::V0(CommitV0 { + content: CommitContent::V0(c), + .. + }) => match &c.header_keys { + Some(CommitHeaderKeys::V0(hk)) => hk.acks.is_empty() && hk.nacks.is_empty(), None => true, }, _ => unimplemented!(), } } - /// Get acks + /// Get acks (that have both an ID in the header and a key in the header_keys) pub fn acks(&self) -> Vec { let mut res: Vec = vec![]; match self { Commit::V0(c) => match &c.header { - Some(header_v0) => match &c.content.header_keys { - Some(hk) => { - for ack in header_v0.acks.iter().zip(hk.acks.iter()) { + Some(CommitHeader::V0(header_v0)) => match &c.content.header_keys() { + Some(CommitHeaderKeys::V0(hk_v0)) => { + for ack in header_v0.acks.iter().zip(hk_v0.acks.iter()) { res.push(ack.into()); } } @@ -248,14 +376,14 @@ impl Commit { res } - /// Get deps + /// Get deps (that have both an ID in the header and a key in the header_keys) pub fn deps(&self) -> Vec { let mut res: Vec = vec![]; match self { Commit::V0(c) => match &c.header { - Some(header_v0) => match &c.content.header_keys { - Some(hk) => { - for dep in header_v0.deps.iter().zip(hk.deps.iter()) { + Some(CommitHeader::V0(header_v0)) => match &c.content.header_keys() { + Some(CommitHeaderKeys::V0(hk_v0)) => { + for dep in header_v0.deps.iter().zip(hk_v0.deps.iter()) { res.push(dep.into()); } } @@ -268,23 +396,22 @@ impl Commit { res } - /// Get all commits that are in the direct causal past of the commit (`deps`, `acks`, `nacks`, `ndeps`) + /// Get all commits that are in the direct causal past of the commit (`deps`, `acks`, `nacks`) + /// only returns objectRefs that have both an ID from header and a KEY from header_keys (it couldn't be otherwise) pub fn direct_causal_past(&self) -> Vec { let mut res: Vec = vec![]; match self { - Commit::V0(c) => match (&c.header, &c.content.header_keys) { - (Some(header_v0), Some(hk)) => { - for ack in header_v0.acks.iter().zip(hk.acks.iter()) { + Commit::V0(c) => match (&c.header, &c.content.header_keys()) { + (Some(CommitHeader::V0(header_v0)), Some(CommitHeaderKeys::V0(hk_v0))) => { + for ack in header_v0.acks.iter().zip(hk_v0.acks.iter()) { res.push(ack.into()); } - for nack in header_v0.nacks.iter().zip(hk.nacks.iter()) { + for nack in header_v0.nacks.iter().zip(hk_v0.nacks.iter()) { res.push(nack.into()); } - for dep in header_v0.deps.iter().zip(hk.deps.iter()) { + for dep in header_v0.deps.iter().zip(hk_v0.deps.iter()) { res.push(dep.into()); - } - for ndep in header_v0.ndeps.iter().zip(hk.ndeps.iter()) { - res.push(ndep.into()); + //TODO deal with deps that are also in acks. should nt be added twice } } _ => {} @@ -297,7 +424,10 @@ impl Commit { /// Get seq pub fn seq(&self) -> u64 { match self { - Commit::V0(c) => c.content.seq, + Commit::V0(CommitV0 { + content: CommitContent::V0(c), + .. + }) => c.seq, } } @@ -307,16 +437,18 @@ impl Commit { Commit::V0(c) => c, }; let content_ser = serde_bare::to_vec(&c.content).unwrap(); - let pubkey = match c.content.author { - PubKey::Ed25519PubKey(pk) => pk, - _ => panic!("author cannot have a Montgomery key"), - }; - let pk = PublicKey::from_bytes(&pubkey)?; - let sig_bytes = match c.sig { - Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), - }; - let sig = Signature::from_bytes(&sig_bytes)?; - pk.verify_strict(&content_ser, &sig) + unimplemented!(); + // FIXME : lookup author in member's list + // let pubkey = match c.content.author() { + // PubKey::Ed25519PubKey(pk) => pk, + // _ => panic!("author cannot have a Montgomery key"), + // }; + // let pk = PublicKey::from_bytes(pubkey)?; + // let sig_bytes = match c.sig { + // Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), + // }; + // let sig = Signature::from_bytes(&sig_bytes)?; + // pk.verify_strict(&content_ser, &sig) } /// Verify commit permissions @@ -325,7 +457,9 @@ impl Commit { .map_err(|_| CommitVerifyError::PermissionDenied) } - /// Verify if the commit's `body`, `header` and direct_causal_past, and recursively all their refs are available in the `store` + /// Verify if the commit's `body` and its direct_causal_past, and recursively all their refs are available in the `store` + /// returns a list of all the ObjectIds that have been visited (only commits in the DAG) + /// or a list of missing blocks pub fn verify_full_object_refs_of_branch_at_commit( &self, store: &Box, @@ -342,49 +476,53 @@ impl Commit { ) -> Result<(), CommitLoadError> { //log_debug!(">>> load_branch: #{}", commit.seq()); - // FIXME: what about this comment? seems like a Commit always has an id - // the self of verify_full_object_refs_of_branch_at_commit() may not have an ID set, - // but the commits loaded from store should have it + // if the self of verify_full_object_refs_of_branch_at_commit() has not been saved yet, then it doesn't have an ID match commit.id() { Some(id) => { if visited.contains(&id) { return Ok(()); } visited.insert(id); + // not adding the ObjectId of the header of this commit as it is not part of the DAG (neither is the CommitBody added to visited) + // // commit.header_id().map(|hid| visited.insert(hid)); + } + None => { + if !visited.is_empty() { + // we are not at the beginning (meaning, the self/the commit object) so this is a panic error as all causal + // past commits have been loaded from store and should have an id + panic!("A Commit in the causal past doesn't have an ID"); + } } - None => panic!("Commit without an ID"), } // load body & check if it's the Branch root commit match commit.load_body(store) { - Ok(body) => { - if commit.is_root_commit_of_branch() { - if body.must_be_root_commit_in_branch() { - Ok(()) - } else { - Err(CommitLoadError::CannotBeAtRootOfBranch) - } - } else { - if body.must_be_root_commit_in_branch() { - Err(CommitLoadError::MustBeAtRootOfBranch) - } else { - Ok(()) - } - } - } + Ok(_) => Ok(()), Err(CommitLoadError::MissingBlocks(m)) => { // The commit body is missing. missing.extend(m); - Ok(()) + Err(CommitLoadError::BodyLoadError) } Err(e) => Err(e), }?; + let body = commit.body().unwrap(); + visited.insert(commit.content_v0().body.id); + if commit.is_root_commit_of_branch() { + if !body.must_be_root_commit_in_branch() { + return Err(CommitLoadError::CannotBeAtRootOfBranch); + } + } else { + if body.must_be_root_commit_in_branch() { + return Err(CommitLoadError::MustBeAtRootOfBranch); + } + } + // load direct causal past for blockref in commit.direct_causal_past() { - match Commit::load(blockref, store) { - Ok(c) => { - load_direct_object_refs(&c, store, visited, missing)?; + match Commit::load(blockref, store, true) { + Ok(mut c) => { + load_direct_object_refs(&mut c, store, visited, missing)?; } Err(CommitLoadError::MissingBlocks(m)) => { missing.extend(m); @@ -407,11 +545,7 @@ impl Commit { } /// Verify signature, permissions, and full causal past - pub fn verify( - &self, - repo: &Repo, - store: &Box, - ) -> Result<(), CommitVerifyError> { + pub fn verify(&self, repo: &Repo) -> Result<(), CommitVerifyError> { self.verify_sig() .map_err(|_e| CommitVerifyError::InvalidSignature)?; self.verify_perm(repo)?; @@ -421,6 +555,218 @@ impl Commit { } } +impl PermissionV0 { + /// the kind of permissions that can be added and removed with AddWritePermission and RemoveWritePermission permissions respectively + pub fn is_write_permission(&self) -> bool { + match self { + Self::WriteAsync | Self::WriteSync | Self::RefreshWriteCap => true, + _ => false, + } + } + + pub fn is_delegated_by_admin(&self) -> bool { + self.is_write_permission() + || match self { + Self::AddReadMember + | Self::RemoveMember + | Self::AddWritePermission + | Self::RemoveWritePermission + | Self::Compact + | Self::AddBranch + | Self::RemoveBranch + | Self::ChangeName + | Self::RefreshReadCap => true, + _ => false, + } + } + + pub fn is_delegated_by_owner(&self) -> bool { + self.is_delegated_by_admin() + || match self { + Self::ChangeQuorum | Self::Admin | Self::ChangeMainBranch => true, + _ => false, + } + } +} + +impl CommitBody { + pub fn root_branch_commit(&self) -> Result<&RootBranch, CommitLoadError> { + match self { + Self::V0(v0) => match v0 { + CommitBodyV0::UpdateRootBranch(rb) | CommitBodyV0::RootBranch(rb) => Ok(rb), + _ => Err(CommitLoadError::BodyTypeMismatch), + }, + } + } + + pub fn is_repository_singleton_commit(&self) -> bool { + match self { + Self::V0(v0) => match v0 { + CommitBodyV0::Repository(_) => true, + _ => false, + }, + } + } + pub fn must_be_root_commit_in_branch(&self) -> bool { + match self { + Self::V0(v0) => match v0 { + CommitBodyV0::Repository(_) => true, + CommitBodyV0::Branch(_) => true, + _ => false, + }, + } + } + + pub fn on_root_branch(&self) -> bool { + match self { + Self::V0(v0) => match v0 { + CommitBodyV0::Repository(_) => true, + CommitBodyV0::RootBranch(_) => true, + CommitBodyV0::UpdateRootBranch(_) => true, + CommitBodyV0::ChangeMainBranch(_) => true, + CommitBodyV0::AddBranch(_) => true, + CommitBodyV0::RemoveBranch(_) => true, + CommitBodyV0::AddMember(_) => true, + CommitBodyV0::RemoveMember(_) => true, + CommitBodyV0::AddPermission(_) => true, + CommitBodyV0::RemovePermission(_) => true, + CommitBodyV0::AddName(_) => true, + CommitBodyV0::RemoveName(_) => true, + //CommitBodyV0::Quorum(_) => true, + CommitBodyV0::RefreshReadCap(_) => true, + CommitBodyV0::RefreshWriteCap(_) => true, + CommitBodyV0::SyncSignature(_) => true, + _ => false, + }, + } + } + + pub fn on_transactional_branch(&self) -> bool { + match self { + Self::V0(v0) => match v0 { + CommitBodyV0::Branch(_) => true, + CommitBodyV0::UpdateBranch(_) => true, + CommitBodyV0::Snapshot(_) => true, + CommitBodyV0::AsyncTransaction(_) => true, + CommitBodyV0::SyncTransaction(_) => true, + CommitBodyV0::AddFile(_) => true, + CommitBodyV0::RemoveFile(_) => true, + CommitBodyV0::Compact(_) => true, + CommitBodyV0::AsyncSignature(_) => true, + CommitBodyV0::RefreshReadCap(_) => true, + CommitBodyV0::RefreshWriteCap(_) => true, + CommitBodyV0::SyncSignature(_) => true, + _ => false, + }, + } + } + + pub fn total_order_required(&self) -> bool { + match self { + Self::V0(v0) => match v0 { + CommitBodyV0::UpdateRootBranch(_) => true, + CommitBodyV0::UpdateBranch(_) => true, + CommitBodyV0::ChangeMainBranch(_) => true, + CommitBodyV0::AddBranch(_) => true, + CommitBodyV0::RemoveBranch(_) => true, + CommitBodyV0::AddMember(_) => true, + CommitBodyV0::RemoveMember(_) => true, + CommitBodyV0::RemovePermission(_) => true, + //CommitBodyV0::Quorum(_) => true, + CommitBodyV0::Compact(_) => true, + CommitBodyV0::SyncTransaction(_) => true, // check Quorum::TotalOrder in CommitContent + CommitBodyV0::RefreshReadCap(_) => true, + CommitBodyV0::RefreshWriteCap(_) => true, + _ => false, + }, + } + } + pub fn required_permission(&self) -> HashSet { + let res: Vec; + res = match self { + Self::V0(v0) => match v0 { + CommitBodyV0::Repository(_) => vec![PermissionV0::Create], + CommitBodyV0::RootBranch(_) => vec![PermissionV0::Create], + CommitBodyV0::UpdateRootBranch(_) => vec![ + PermissionV0::ChangeQuorum, + PermissionV0::RefreshWriteCap, + PermissionV0::RefreshReadCap, + PermissionV0::RefreshOverlay, + ], + CommitBodyV0::AddMember(_) => { + vec![PermissionV0::Create, PermissionV0::AddReadMember] + } + CommitBodyV0::RemoveMember(_) => vec![PermissionV0::RemoveMember], + CommitBodyV0::AddPermission(addp) => { + let mut perms = vec![PermissionV0::Create]; + if addp.permission_v0().is_delegated_by_admin() { + perms.push(PermissionV0::Admin); + } + if addp.permission_v0().is_write_permission() { + perms.push(PermissionV0::AddWritePermission); + } + perms + } + CommitBodyV0::RemovePermission(remp) => { + let mut perms = vec![]; + if remp.permission_v0().is_delegated_by_admin() { + perms.push(PermissionV0::Admin); + } + if remp.permission_v0().is_write_permission() { + perms.push(PermissionV0::RemoveWritePermission); + } + perms + } + CommitBodyV0::AddBranch(_) => vec![ + PermissionV0::Create, + PermissionV0::AddBranch, + PermissionV0::RefreshReadCap, + PermissionV0::RefreshWriteCap, + PermissionV0::RefreshOverlay, + ], + CommitBodyV0::RemoveBranch(_) => vec![PermissionV0::RemoveBranch], + CommitBodyV0::UpdateBranch(_) => { + vec![PermissionV0::RefreshReadCap, PermissionV0::RefreshWriteCap] + } + CommitBodyV0::AddName(_) => vec![PermissionV0::AddBranch, PermissionV0::ChangeName], + CommitBodyV0::RemoveName(_) => { + vec![PermissionV0::ChangeName, PermissionV0::RemoveBranch] + } + CommitBodyV0::Branch(_) => vec![PermissionV0::Create, PermissionV0::AddBranch], + CommitBodyV0::ChangeMainBranch(_) => { + vec![PermissionV0::Create, PermissionV0::ChangeMainBranch] + } + CommitBodyV0::Snapshot(_) => vec![PermissionV0::WriteAsync], + CommitBodyV0::Compact(_) => vec![PermissionV0::Compact], + CommitBodyV0::AsyncTransaction(_) => vec![PermissionV0::WriteAsync], + CommitBodyV0::AddFile(_) => vec![PermissionV0::WriteAsync, PermissionV0::WriteSync], + CommitBodyV0::RemoveFile(_) => { + vec![PermissionV0::WriteAsync, PermissionV0::WriteSync] + } + CommitBodyV0::SyncTransaction(_) => vec![PermissionV0::WriteSync], + CommitBodyV0::AsyncSignature(_) => vec![PermissionV0::WriteAsync], + CommitBodyV0::SyncSignature(_) => vec![ + PermissionV0::WriteSync, + PermissionV0::ChangeQuorum, + PermissionV0::RefreshWriteCap, + PermissionV0::RefreshReadCap, + PermissionV0::RefreshOverlay, + PermissionV0::ChangeMainBranch, + PermissionV0::AddBranch, + PermissionV0::RemoveBranch, + PermissionV0::AddReadMember, + PermissionV0::RemoveMember, + PermissionV0::RemoveWritePermission, + PermissionV0::Compact, + ], + CommitBodyV0::RefreshReadCap(_) => vec![PermissionV0::RefreshReadCap], + CommitBodyV0::RefreshWriteCap(_) => vec![PermissionV0::RefreshWriteCap], + }, + }; + HashSet::from_iter(res.iter().cloned()) + } +} + mod test { use std::collections::HashMap; @@ -483,8 +829,7 @@ mod test { let store = Box::new(HashMapRepoStore::new()); - let repo = - Repo::new_with_member(&pub_key, pub_key.clone(), &[Permission::Transaction], store); + let repo = Repo::new_with_member(&pub_key, &pub_key, &[PermissionV0::WriteAsync], store); //let body = CommitBody::Ack(Ack::V0()); //log_debug!("body: {:?}", body); @@ -511,7 +856,7 @@ mod test { Err(e) => panic!("Commit verify error: {:?}", e), } - match commit.verify(&repo, repo.get_store()) { + match commit.verify(&repo) { Ok(_) => panic!("Commit should not be Ok"), Err(CommitVerifyError::BodyLoadError(CommitLoadError::MissingBlocks(missing))) => { assert_eq!(missing.len(), 1); diff --git a/p2p-repo/src/object.rs b/p2p-repo/src/object.rs index 73a068b..e2e0af6 100644 --- a/p2p-repo/src/object.rs +++ b/p2p-repo/src/object.rs @@ -25,6 +25,8 @@ use crate::types::*; /// Size of a serialized empty Block const EMPTY_BLOCK_SIZE: usize = 12 + 1; +/// Max size of an embedded CommitHeader +const MAX_EMBEDDED_COMMIT_HEADER_SIZE: usize = 100; /// Size of a serialized BlockId const BLOCK_ID_SIZE: usize = 33; /// Size of serialized SymKey @@ -49,6 +51,12 @@ pub struct Object { /// Header header: Option, + + /// Blocks of the Header (nodes of the tree) + header_blocks: Vec, + + #[cfg(test)] + already_saved: bool, } /// Object parsing errors @@ -80,8 +88,11 @@ pub enum ObjectCopyError { } impl Object { - fn convergence_key(repo_pubkey: PubKey, repo_secret: SymKey) -> [u8; blake3::OUT_LEN] { - let key_material = match (repo_pubkey, repo_secret) { + fn convergence_key( + store_pubkey: &StoreRepo, + store_readcap_secret: &ReadCapSecret, + ) -> [u8; blake3::OUT_LEN] { + let key_material = match (*store_pubkey.repo_id(), store_readcap_secret.clone()) { (PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => { [pubkey, secret].concat() } @@ -94,7 +105,7 @@ impl Object { content: &[u8], conv_key: &[u8; blake3::OUT_LEN], children: Vec, - header_ref: Option, + header_ref: Option, ) -> Block { let key_hash = blake3::keyed_hash(conv_key, content); let nonce = [0u8; 12]; @@ -114,28 +125,39 @@ impl Object { fn make_header_v0( header: CommitHeaderV0, object_size: usize, - repo_pubkey: PubKey, - repo_secret: SymKey, - ) -> ObjectRef { + store: &StoreRepo, + store_secret: &ReadCapSecret, + ) -> (ObjectRef, Vec) { let header_obj = Object::new( - ObjectContentV0::CommitHeader(header), + ObjectContent::V0(ObjectContentV0::CommitHeader(CommitHeader::V0(header))), None, object_size, - repo_pubkey, - repo_secret, + store, + store_secret, ); let header_ref = ObjectRef { id: header_obj.id(), key: header_obj.key().unwrap(), }; - header_ref + (header_ref, header_obj.blocks) + } + + fn make_header( + header: CommitHeader, + object_size: usize, + store: &StoreRepo, + store_secret: &ReadCapSecret, + ) -> (ObjectRef, Vec) { + match header { + CommitHeader::V0(v0) => Self::make_header_v0(v0, object_size, store, store_secret), + } } /// Build tree from leaves, returns parent nodes fn make_tree( leaves: &[Block], conv_key: &ChaCha20Key, - header_ref: &Option, + mut header_ref: Option, arity: usize, ) -> Vec { let mut parents = vec![]; @@ -146,17 +168,17 @@ impl Object { let children = nodes.iter().map(|block| block.id()).collect(); let content = ChunkContentV0::InternalNode(keys); let content_ser = serde_bare::to_vec(&content).unwrap(); - let child_header = None; + //let child_header = None; let header = if parents.is_empty() && it.peek().is_none() { - header_ref + header_ref.take() } else { - &child_header + None }; parents.push(Self::make_block( content_ser.as_slice(), conv_key, children, - header.clone(), + header, )); } //log_debug!("parents += {}", parents.len()); @@ -178,14 +200,14 @@ impl Object { /// * `content`: Object content /// * `header`: CommitHeaderV0 : All references of the object /// * `block_size`: Desired block size for chunking content, rounded up to nearest valid block size - /// * `repo_pubkey`: Repository public key - /// * `repo_secret`: Repository secret + /// * `store`: store public key + /// * `store_secret`: store's read capability secret pub fn new( - content: ObjectContentV0, - header: Option, + content: ObjectContent, + mut header: Option, block_size: usize, - repo_pubkey: PubKey, - repo_secret: SymKey, + store: &StoreRepo, + store_secret: &ReadCapSecret, ) -> Object { // create blocks by chunking + encrypting content let valid_block_size = store_valid_value_size(block_size); @@ -193,16 +215,42 @@ impl Object { let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA; let mut blocks: Vec = vec![]; - let conv_key = Self::convergence_key(repo_pubkey, repo_secret.clone()); - - let header_ref = header - .clone() - .map(|ch| Self::make_header_v0(ch, valid_block_size, repo_pubkey, repo_secret.clone())); + let conv_key = Self::convergence_key(store, store_secret); + + let (header_ref, header_blocks) = match &header { + None => (None, vec![]), + Some(h) => { + let res = Self::make_header(h.clone(), valid_block_size, store, store_secret); + if res.1.len() == 1 + && res.1[0].encrypted_content().len() < MAX_EMBEDDED_COMMIT_HEADER_SIZE + { + ( + Some(CommitHeaderRef { + obj: CommitHeaderObject::EncryptedContent( + res.1[0].encrypted_content().to_vec(), + ), + key: res.0.key, + }), + vec![], + ) + } else { + header.as_mut().unwrap().set_id(res.0.id); + ( + Some(CommitHeaderRef { + obj: CommitHeaderObject::Id(res.0.id), + key: res.0.key, + }), + res.1, + ) + } + } + }; let content_ser = serde_bare::to_vec(&content).unwrap(); if EMPTY_BLOCK_SIZE + DATA_VARINT_EXTRA + + MAX_EMBEDDED_COMMIT_HEADER_SIZE + BLOCK_ID_SIZE * header_ref.as_ref().map_or(0, |_| 1) + content_ser.len() <= valid_block_size @@ -234,13 +282,16 @@ impl Object { let arity: usize = (valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_HEADER_SIZE) / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); - let mut parents = Self::make_tree(blocks.as_slice(), &conv_key, &header_ref, arity); + let mut parents = Self::make_tree(blocks.as_slice(), &conv_key, header_ref, arity); blocks.append(&mut parents); } Object { blocks, - header: header.map(|h| CommitHeader::V0(h)), + header, + header_blocks, + #[cfg(test)] + already_saved: false, } } @@ -292,25 +343,44 @@ impl Object { } let header = match root.header_ref() { - Some(header_ref) => { - let obj = Object::load(header_ref.id, Some(header_ref.key), store)?; - match obj.content()? { - ObjectContent::V0(ObjectContentV0::CommitHeader(commit_header)) => { - Some(CommitHeader::V0(commit_header)) + Some(header_ref) => match header_ref.obj { + CommitHeaderObject::None => panic!("shouldn't happen"), + CommitHeaderObject::Id(id) => { + let obj = Object::load(id, Some(header_ref.key.clone()), store)?; + match obj.content()? { + ObjectContent::V0(ObjectContentV0::CommitHeader(mut commit_header)) => { + commit_header.set_id(id); + (Some(commit_header), Some(obj.blocks)) + } + _ => return Err(ObjectParseError::InvalidHeader), } - _ => return Err(ObjectParseError::InvalidHeader), } - } - None => None, + CommitHeaderObject::EncryptedContent(content) => { + match serde_bare::from_slice(content.as_slice()) { + Ok(ObjectContent::V0(ObjectContentV0::CommitHeader(commit_header))) => { + (Some(commit_header), None) + } + Err(e) => return Err(ObjectParseError::InvalidHeader), + _ => return Err(ObjectParseError::InvalidHeader), + } + } + }, + None => (None, None), }; - Ok(Object { blocks, header }) + Ok(Object { + blocks, + header: header.0, + header_blocks: header.1.unwrap_or(vec![]), + #[cfg(test)] + already_saved: true, + }) } - /// Save blocks of the object in the store + /// Save blocks of the object and the blocks of the header object in the store pub fn save(&self, store: &Box) -> Result<(), StorageError> { let mut deduplicated: HashSet = HashSet::new(); - for block in &self.blocks { + for block in self.blocks.iter().chain(self.header_blocks.iter()) { let id = block.id(); if deduplicated.get(&id).is_none() { store.put(block)?; @@ -319,15 +389,30 @@ impl Object { } Ok(()) } + #[cfg(test)] + pub fn save_in_test( + &mut self, + store: &Box, + ) -> Result<(), StorageError> { + assert!(self.already_saved == false); + self.already_saved = true; + + self.save(store) + } /// Get the ID of the Object pub fn id(&self) -> ObjectId { - self.blocks.last().unwrap().id() + self.root_block().id() + } + + /// Get the ID of the Object and saves it + pub fn get_and_save_id(&mut self) -> ObjectId { + self.blocks.last_mut().unwrap().get_and_save_id() } /// Get the key for the Object pub fn key(&self) -> Option { - self.blocks.last().unwrap().key() + self.root_block().key() } /// Get an `ObjectRef` for the root object @@ -346,6 +431,8 @@ impl Object { self.header.as_ref().map_or(true, |h| h.is_root()) } + /// Get deps (that have an ID in the header, without checking if there is a key for it in the header_keys) + /// if there is no header, returns an empty vec pub fn deps(&self) -> Vec { match &self.header { Some(h) => h.deps(), @@ -353,6 +440,8 @@ impl Object { } } + /// Get acks (that have an ID in the header, without checking if there is a key for it in the header_keys) + /// if there is no header, returns an empty vec pub fn acks(&self) -> Vec { match &self.header { Some(h) => h.acks(), @@ -409,7 +498,7 @@ impl Object { match block { Block::V0(b) => { - // decrypt content + // decrypt content in place (this is why we have to clone first) let mut content_dec = b.content.encrypted_content().clone(); match key { SymKey::ChaCha20Key(key) => { @@ -450,6 +539,8 @@ impl Object { } ChunkContentV0::DataChunk(chunk) => { if leaves.is_some() { + //FIXME this part is never used (when leaves.is_some ?) + //FIXME if it was used, we should probably try to remove the block.clone() let mut leaf = block.clone(); leaf.set_key(Some(key.clone())); let l = &mut **leaves.as_mut().unwrap(); @@ -482,21 +573,21 @@ impl Object { Ok(()) } - /// Parse the Object and return the leaf Blocks with decryption key set - pub fn leaves(&self) -> Result, ObjectParseError> { - let mut leaves: Vec = vec![]; - let parents = vec![(self.id(), self.key().unwrap())]; - match Self::collect_leaves( - &self.blocks, - &parents, - self.blocks.len() - 1, - &mut Some(&mut leaves), - &mut None, - ) { - Ok(_) => Ok(leaves), - Err(e) => Err(e), - } - } + // /// Parse the Object and return the leaf Blocks with decryption key set + // pub fn leaves(&self) -> Result, ObjectParseError> { + // let mut leaves: Vec = vec![]; + // let parents = vec![(self.id(), self.key().unwrap())]; + // match Self::collect_leaves( + // &self.blocks, + // &parents, + // self.blocks.len() - 1, + // &mut Some(&mut leaves), + // &mut None, + // ) { + // Ok(_) => Ok(leaves), + // Err(e) => Err(e), + // } + // } /// Parse the Object and return the decrypted content assembled from Blocks pub fn content(&self) -> Result { @@ -512,16 +603,13 @@ impl Object { &mut None, &mut Some(&mut obj_content), ) { - Ok(_) => { - let content: ObjectContentV0; - match serde_bare::from_slice(obj_content.as_slice()) { - Ok(c) => Ok(ObjectContent::V0(c)), - Err(e) => { - log_debug!("Object deserialize error: {}", e); - Err(ObjectParseError::ObjectDeserializeError) - } + Ok(_) => match serde_bare::from_slice(obj_content.as_slice()) { + Ok(c) => Ok(c), + Err(e) => { + log_debug!("Object deserialize error: {}", e); + Err(ObjectParseError::ObjectDeserializeError) } - } + }, Err(e) => Err(e), } } @@ -572,20 +660,21 @@ mod test { .read_to_end(&mut img_buffer) .expect("read of test.jpg"); - let file = FileV0 { + let file = File::V0(FileV0 { content_type: "image/jpeg".into(), metadata: vec![], content: img_buffer, - }; - let content = ObjectContentV0::File(file); + }); + let content = ObjectContent::V0(ObjectContentV0::File(file)); let deps: Vec = vec![Digest::Blake3Digest32([9; 32])]; let max_object_size = store_max_value_size(); - let repo_secret = SymKey::ChaCha20Key([0; 32]); - let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); + let store_secret = SymKey::ChaCha20Key([0; 32]); + let store_pubkey = PubKey::Ed25519PubKey([1; 32]); + let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey)); - let obj = Object::new(content, None, max_object_size, repo_pubkey, repo_secret); + let obj = Object::new(content, None, max_object_size, &store_repo, &store_secret); log_debug!("obj.id: {:?}", obj.id()); log_debug!("obj.key: {:?}", obj.key()); @@ -607,27 +696,28 @@ mod test { /// Test tree API #[test] pub fn test_object() { - let file = FileV0 { + let file = File::V0(FileV0 { content_type: "file/test".into(), metadata: Vec::from("some meta data here"), content: [(0..255).collect::>().as_slice(); 320].concat(), - }; - let content = ObjectContentV0::File(file); + }); + let content = ObjectContent::V0(ObjectContentV0::File(file)); let deps = vec![Digest::Blake3Digest32([9; 32])]; - let header = CommitHeaderV0::new_with_deps(deps.clone()); + let header = CommitHeader::new_with_deps(deps.clone()); let exp = Some(2u32.pow(31)); let max_object_size = 0; - let repo_secret = SymKey::ChaCha20Key([0; 32]); - let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); + let store_secret = SymKey::ChaCha20Key([0; 32]); + let store_pubkey = PubKey::Ed25519PubKey([1; 32]); + let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey)); - let obj = Object::new( + let mut obj = Object::new( content.clone(), header, max_object_size, - repo_pubkey, - repo_secret.clone(), + &store_repo, + &store_secret, ); log_debug!("obj.id: {:?}", obj.id()); @@ -645,14 +735,14 @@ mod test { assert_eq!(*obj.deps(), deps); match obj.content() { - Ok(ObjectContent::V0(cnt)) => { + Ok(cnt) => { assert_eq!(content, cnt); } Err(e) => panic!("Object parse error: {:?}", e), } let store = Box::new(HashMapRepoStore::new()); - obj.save(&store).expect("Object save error"); + obj.save_in_test(&store).expect("Object save error"); let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap(); @@ -669,7 +759,7 @@ mod test { assert_eq!(*obj2.deps(), deps); - match obj2.content_v0() { + match obj2.content() { Ok(cnt) => { assert_eq!(content, cnt); } @@ -702,11 +792,11 @@ mod test { pub fn test_depth_1() { let deps: Vec = vec![Digest::Blake3Digest32([9; 32])]; - let empty_file = ObjectContentV0::File(FileV0 { + let empty_file = ObjectContent::V0(ObjectContentV0::File(File::V0(FileV0 { content_type: "".into(), metadata: vec![], content: vec![], - }); + }))); let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap(); log_debug!("empty file size: {}", empty_file_ser.len()); @@ -718,26 +808,27 @@ mod test { - DATA_VARINT_EXTRA; log_debug!("file size: {}", size); - let content = ObjectContentV0::File(FileV0 { + let content = ObjectContent::V0(ObjectContentV0::File(File::V0(FileV0 { content_type: "".into(), metadata: vec![], content: vec![99; size], - }); + }))); let content_ser = serde_bare::to_vec(&content).unwrap(); log_debug!("content len: {}", content_ser.len()); let expiry = Some(2u32.pow(31)); let max_object_size = store_max_value_size(); - let repo_secret = SymKey::ChaCha20Key([0; 32]); - let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); + let store_secret = SymKey::ChaCha20Key([0; 32]); + let store_pubkey = PubKey::Ed25519PubKey([1; 32]); + let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey)); let object = Object::new( content, - CommitHeaderV0::new_with_deps(deps), + CommitHeader::new_with_deps(deps), max_object_size, - repo_pubkey, - repo_secret, + &store_repo, + &store_secret, ); log_debug!("root_id: {:?}", object.id()); @@ -779,7 +870,7 @@ mod test { let root_depsref = Block::new( vec![], - Some(ObjectRef::from_id_key(id, key.clone())), + Some(CommitHeaderRef::from_id_key(id, key.clone())), data_ser.clone(), None, ); @@ -797,7 +888,7 @@ mod test { let root_one = Block::new( vec![id; 1], - Some(ObjectRef::from_id_key(id, key.clone())), + Some(CommitHeaderRef::from_id_key(id, key.clone())), one_key_ser.clone(), None, ); @@ -805,7 +896,7 @@ mod test { let root_two = Block::new( vec![id; 2], - Some(ObjectRef::from_id_key(id, key)), + Some(CommitHeaderRef::from_id_key(id, key)), two_keys_ser.clone(), None, ); diff --git a/p2p-repo/src/repo.rs b/p2p-repo/src/repo.rs index 6d723c9..1c7774a 100644 --- a/p2p-repo/src/repo.rs +++ b/p2p-repo/src/repo.rs @@ -35,20 +35,29 @@ impl Repository { pub struct UserInfo { /// list of permissions granted to user, with optional metadata - pub permissions: HashMap>, + pub permissions: HashMap>, + pub id: UserId, } impl UserInfo { - pub fn has_any_perm(&self, perms: &HashSet<&Permission>) -> Result<(), NgError> { - let has_perms: HashSet<&Permission> = self.permissions.keys().collect(); - if has_perms.intersection(perms).count() > 0 { - Ok(()) - } else { - Err(NgError::PermissionDenied) + pub fn has_any_perm(&self, perms: &HashSet) -> Result<(), NgError> { + if self.has_perm(&PermissionV0::Owner).is_ok() { + return Ok(()); } - // + let is_admin = self.has_perm(&PermissionV0::Admin).is_ok(); + //is_delegated_by_admin + let has_perms: HashSet<&PermissionV0> = self.permissions.keys().collect(); + for perm in perms { + if is_admin && perm.is_delegated_by_admin() || has_perms.contains(perm) { + return Ok(()); + } + } + // if has_perms.intersection(perms).count() > 0 { + // Ok(()) + // } else { + Err(NgError::PermissionDenied) } - pub fn has_perm(&self, perm: &Permission) -> Result<&Vec, NgError> { + pub fn has_perm(&self, perm: &PermissionV0) -> Result<&Vec, NgError> { self.permissions.get(perm).ok_or(NgError::PermissionDenied) } } @@ -58,7 +67,7 @@ pub struct Repo<'a> { /// Repo definition pub repo_def: Repository, - pub members: HashMap, + pub members: HashMap, store: Box, } @@ -66,8 +75,8 @@ pub struct Repo<'a> { impl<'a> Repo<'a> { pub fn new_with_member( id: &PubKey, - member: UserId, - perms: &[Permission], + member: &UserId, + perms: &[PermissionV0], store: Box, ) -> Self { let mut members = HashMap::new(); @@ -75,11 +84,17 @@ impl<'a> Repo<'a> { perms .iter() .map(|p| (*p, vec![])) - .collect::)>>() + .collect::)>>() .iter() .cloned(), ); - members.insert(member, UserInfo { permissions }); + members.insert( + member.into(), + UserInfo { + id: *member, + permissions, + }, + ); Self { repo_def: Repository::new(id, &vec![]), members, @@ -88,9 +103,9 @@ impl<'a> Repo<'a> { } pub fn verify_permission(&self, commit: &Commit) -> Result<(), NgError> { - let content = commit.content_v0(); + let content_author = commit.content_v0().author; let body = commit.load_body(&self.store)?; - match self.members.get(&content.author) { + match self.members.get(&content_author) { Some(info) => return info.has_any_perm(&body.required_permission()), None => {} } diff --git a/p2p-repo/src/site.rs b/p2p-repo/src/site.rs index 9cd8c20..a22575a 100644 --- a/p2p-repo/src/site.rs +++ b/p2p-repo/src/site.rs @@ -33,22 +33,30 @@ impl SiteV0 { let private_key = PrivKey::random_ed(); + /* pub key: PrivKey, + // signature with site_key + // pub sig: Sig, + /// current read capabililty + pub read_cap: ReadCap, + + pub write_cap: RepoWriteCapSecret, */ + let public = SiteStore { key: PrivKey::dummy(), - root_branch_def_ref: BlockRef::dummy(), - repo_secret: SymKey::random(), + read_cap: BlockRef::dummy(), + write_cap: SymKey::random(), }; let protected = SiteStore { key: PrivKey::dummy(), - root_branch_def_ref: BlockRef::dummy(), - repo_secret: SymKey::random(), + read_cap: BlockRef::dummy(), + write_cap: SymKey::random(), }; let private = SiteStore { key: PrivKey::dummy(), - root_branch_def_ref: BlockRef::dummy(), - repo_secret: SymKey::random(), + read_cap: BlockRef::dummy(), + write_cap: SymKey::random(), }; Ok(Self { diff --git a/p2p-repo/src/store.rs b/p2p-repo/src/store.rs index 6cdec57..d732392 100644 --- a/p2p-repo/src/store.rs +++ b/p2p-repo/src/store.rs @@ -42,6 +42,7 @@ pub enum StorageError { BackendError, SerializationError, AlreadyExists, + DataCorruption, } impl core::fmt::Display for StorageError { @@ -112,7 +113,15 @@ impl HashMapRepoStore { impl RepoStore for HashMapRepoStore { fn get(&self, id: &BlockId) -> Result { match self.blocks.read().unwrap().get(id) { - Some(block) => Ok(block.clone()), + Some(block) => { + let mut b = block.clone(); + let i = b.get_and_save_id(); + if *id == i { + Ok(b) + } else { + Err(StorageError::DataCorruption) + } + } None => Err(StorageError::NotFound), } } diff --git a/p2p-repo/src/types.rs b/p2p-repo/src/types.rs index 9687d04..e1aef73 100644 --- a/p2p-repo/src/types.rs +++ b/p2p-repo/src/types.rs @@ -19,6 +19,7 @@ use crate::utils::{ ed_privkey_to_ed_pubkey, from_ed_privkey_to_dh_privkey, random_key, }; use core::fmt; +use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use serde_bare::to_vec; use std::collections::{HashMap, HashSet}; @@ -46,6 +47,26 @@ impl fmt::Display for Digest { } } +impl From<&Vec> for Digest { + fn from(ser: &Vec) -> Self { + let hash = blake3::hash(ser.as_slice()); + Digest::Blake3Digest32(hash.as_bytes().clone()) + } +} + +impl From<&[u8; 32]> for Digest { + fn from(ser: &[u8; 32]) -> Self { + let hash = blake3::hash(ser); + Digest::Blake3Digest32(hash.as_bytes().clone()) + } +} + +impl From<&PubKey> for Digest { + fn from(key: &PubKey) -> Self { + key.slice().into() + } +} + /// ChaCha20 symmetric key pub type ChaCha20Key = [u8; 32]; @@ -340,7 +361,28 @@ pub type ObjectKey = BlockKey; /// Object reference pub type ObjectRef = BlockRef; -/// IDENTITY, SITE, STORE, OVERLAY common types +/// Read capability (for a commit, branch, whole repo, or store) +/// For a store: A ReadCap to the root repo of the store +/// For a repo: A reference to the latest RootBranch definition commit +/// For a branch: A reference to the latest Branch definition commit +/// For a commit or object, the ObjectRef is itself the read capability +pub type ReadCap = ObjectRef; + +/// Read capability secret (for a commit, branch, whole repo, or store) +/// it is already included in the ReadCap (it is the key part of the reference) +pub type ReadCapSecret = ObjectKey; + +/// Write capability secret (for a whole repo) +pub type RepoWriteCapSecret = SymKey; + +/// Write capability secret (for a branch's topic) +pub type BranchWriteCapSecret = PrivKey; + +//TODO: PermaReadCap (involves sending an InboxPost to some verifiers) + +// +// IDENTITY, SITE, STORE, OVERLAY common types +// /// List of Identity types #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] @@ -353,29 +395,54 @@ pub enum Identity { IndividualPublicStore(PubKey), IndividualProtectedStore(PubKey), IndividualPrivateStore(PubKey), + //Document(RepoId), } /// List of Store Overlay types #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] -pub enum StoreOverlay { +pub enum StoreOverlayV0 { PublicStore(PubKey), ProtectedStore(PubKey), - PrivateStore(PubKey), + //PrivateStore(PubKey), Group(PubKey), Dialog(Digest), - //Document(RepoId), +} + +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum StoreOverlay { + V0(StoreOverlayV0), + Own(BranchId), // The repo is a store, so the overlay can be derived from its own ID. In this case, the branchId of the `overlay` branch is entered here. } /// List of Store Root Repo types #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] -pub enum StoreRootRepo { +pub enum StoreRepoV0 { PublicStore(RepoId), ProtectedStore(RepoId), - PrivateStore(RepoId), + //PrivateStore(RepoId), Group(RepoId), Dialog(RepoId), } +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub enum StoreRepo { + V0(StoreRepoV0), +} + +impl StoreRepo { + pub fn repo_id(&self) -> &RepoId { + match self { + Self::V0(v0) => match v0 { + StoreRepoV0::PublicStore(id) + | StoreRepoV0::ProtectedStore(id) + //| StoreRepoV0::PrivateStore(id) + | StoreRepoV0::Group(id) + | StoreRepoV0::Dialog(id) => id, + }, + } + } +} + /// Site type #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] pub enum SiteType { @@ -390,9 +457,10 @@ pub struct SiteStore { pub key: PrivKey, // signature with site_key // pub sig: Sig, - pub root_branch_def_ref: ObjectRef, + /// current read capabililty + pub read_cap: ReadCap, - pub repo_secret: SymKey, + pub write_cap: RepoWriteCapSecret, } /// Site Store type @@ -432,9 +500,9 @@ pub struct ReducedSiteV0 { pub private_site_key: PrivKey, - pub private_site_root_branch_def_ref: ObjectRef, + pub private_site_read_cap: ReadCap, - pub private_site_repo_secret: SymKey, + pub private_site_write_cap: RepoWriteCapSecret, pub core: PubKey, @@ -458,12 +526,21 @@ pub enum ChunkContentV0 { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct CommitHeaderV0 { + /// optional Commit Header ID + #[serde(skip)] + pub id: Option, + /// Other objects this commit strongly depends on (ex: ADD for a REMOVE, refs for an nrefs) pub deps: Vec, /// dependency that is removed after this commit. used for reverts pub ndeps: Vec, + /// tells brokers that this is a hard snapshot and that all the ACKs and full causal past should be treated as ndeps (their body removed) + /// brokers will only perform the deletion of bodies after this commit has been ACKed by at least one subsequent commit + /// but if the next commit is a nack, the deletion is prevented. + pub compact: bool, + /// current valid commits in head pub acks: Vec, @@ -499,11 +576,53 @@ impl CommitHeader { CommitHeader::V0(v0) => v0.acks.clone(), } } + pub fn id(&self) -> &Option { + match self { + CommitHeader::V0(v0) => &v0.id, + } + } + + pub fn set_id(&mut self, id: Digest) { + match self { + CommitHeader::V0(v0) => v0.id = Some(id), + } + } + + pub fn set_compact(&mut self) { + match self { + CommitHeader::V0(v0) => v0.set_compact(), + } + } + + pub fn new_with( + deps: Vec, + ndeps: Vec, + acks: Vec, + nacks: Vec, + refs: Vec, + nrefs: Vec, + ) -> (Option, Option) { + let res = CommitHeaderV0::new_with(deps, ndeps, acks, nacks, refs, nrefs); + ( + res.0.map(|h| CommitHeader::V0(h)), + res.1.map(|h| CommitHeaderKeys::V0(h)), + ) + } + + pub fn new_with_deps(deps: Vec) -> Option { + CommitHeaderV0::new_with_deps(deps).map(|ch| CommitHeader::V0(ch)) + } + + pub fn new_with_deps_and_acks(deps: Vec, acks: Vec) -> Option { + CommitHeaderV0::new_with_deps_and_acks(deps, acks).map(|ch| CommitHeader::V0(ch)) + } } impl CommitHeaderV0 { fn new_empty() -> Self { Self { + id: None, + compact: false, deps: vec![], ndeps: vec![], acks: vec![], @@ -513,6 +632,10 @@ impl CommitHeaderV0 { } } + pub fn set_compact(&mut self) { + self.compact = true; + } + pub fn new_with( deps: Vec, ndeps: Vec, @@ -538,7 +661,6 @@ impl CommitHeaderV0 { let mut inrefs: Vec = vec![]; let mut kdeps: Vec = vec![]; - let mut kndeps: Vec = vec![]; let mut kacks: Vec = vec![]; let mut knacks: Vec = vec![]; for d in deps { @@ -547,7 +669,6 @@ impl CommitHeaderV0 { } for d in ndeps { indeps.push(d.id); - kndeps.push(d.key); } for d in acks { iacks.push(d.id); @@ -565,6 +686,8 @@ impl CommitHeaderV0 { } ( Some(Self { + id: None, + compact: false, deps: ideps, ndeps: indeps, acks: iacks, @@ -574,7 +697,6 @@ impl CommitHeaderV0 { }), Some(CommitHeaderKeysV0 { deps: kdeps, - ndeps: kndeps, acks: kacks, nacks: knacks, refs, @@ -609,9 +731,8 @@ pub struct CommitHeaderKeysV0 { /// Other objects this commit strongly depends on (ex: ADD for a REMOVE, refs for an nrefs) pub deps: Vec, - /// dependencies that are removed after this commit. used for reverts - pub ndeps: Vec, - + // ndeps keys are not included because we don't need the keys to access the commits we will not need anymore + // the keys are in the deps of their respective subsequent commits in the DAG anyway /// current valid commits in head pub acks: Vec, @@ -622,7 +743,7 @@ pub struct CommitHeaderKeysV0 { /// even if the CommitHeader is omitted, we want the Files to be openable. pub refs: Vec, // nrefs keys are not included because we don't need the keys to access the files we will not need anymore - // the keys are in the deps anyway + // the keys are in the deps of the respective commits that added them anyway } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] @@ -630,13 +751,34 @@ pub enum CommitHeaderKeys { V0(CommitHeaderKeysV0), } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum CommitHeaderObject { + Id(ObjectId), + EncryptedContent(Vec), + None, +} + +pub struct CommitHeaderRef { + pub obj: CommitHeaderObject, + pub key: ObjectKey, +} + +impl CommitHeaderRef { + pub fn from_id_key(id: BlockId, key: ObjectKey) -> Self { + CommitHeaderRef { + obj: CommitHeaderObject::Id(id), + key, + } + } +} + #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct BlockContentV0 { - /// Reference (actually, only its ID) to a CommitHeader of the root Block of a commit that contains references to other objects (e.g. Commit deps & acks) + /// Reference (actually, only its ID or an embedded block if the size is small enough) + /// to a CommitHeader of the root Block of a commit that contains references to other objects (e.g. Commit deps & acks) /// Only set if the block is a commit (and it is the root block of the Object). - /// It is an easy way to know if the Block is a commit. - /// And ObjectRef to an Object containing a CommitHeaderV0 - pub commit_header_id: Option, + /// It is an easy way to know if the Block is a commit (but be careful because some root commits can be without a header). + pub commit_header: CommitHeaderObject, /// Block IDs for child nodes in the Merkle tree, can be empty if ObjectContent fits in one block pub children: Vec, @@ -645,7 +787,8 @@ pub struct BlockContentV0 { /// /// Encrypted using convergent encryption with ChaCha20: /// - convergence_key: BLAKE3 derive_key ("NextGraph Data BLAKE3 key", - /// repo_pubkey + repo_secret) + /// StoreRepo + store's repo ReadCapSecret ) + /// // basically similar to the InnerOverlayId but not hashed, so that brokers cannot do preimage attack /// - key: BLAKE3 keyed hash (convergence_key, plain_chunk_content) /// - nonce: 0 #[serde(with = "serde_bytes")] @@ -658,6 +801,14 @@ pub enum BlockContent { V0(BlockContentV0), } +impl BlockContent { + pub fn commit_header_obj(&self) -> &CommitHeaderObject { + match self { + Self::V0(v0) => &v0.commit_header, + } + } +} + /// Immutable block with encrypted content /// /// `ObjectContent` is chunked and stored as `Block`s in a Merkle tree. @@ -674,7 +825,8 @@ pub struct BlockV0 { /// Header // #[serde(skip)] - // pub header: Option, + // TODO + // pub header: Option, /// Key needed to open the CommitHeader. can be omitted if the Commit is shared without its ancestors, /// or if the block is not a root block of commit, or that commit is a root commit (first in branch) @@ -693,10 +845,10 @@ pub enum Block { /// Repository definition /// -/// First commit published in root branch, where: -/// - branch_pubkey: repo_pubkey -/// - branch_secret: BLAKE3 derive_key ("NextGraph Root Branch secret", -/// repo_pubkey + repo_secret) +/// First commit published in root branch, signed by repository key +/// For the Root repo of a store(overlay), the convergence_key should be derived from : +/// "NextGraph Store Root Repo BLAKE3 convergence key", +/// RepoId + RepoWriteCapSecret) #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct RepositoryV0 { /// Repo public key ID @@ -710,7 +862,8 @@ pub struct RepositoryV0 { pub creator: Option, // TODO: discrete doc type - // TODO: order (partial order, total order, partial sign all commits, fsm, smart contract ) + // TODO: order (store, partial order, total order, partial sign all commits, fsm, smart contract ) + // TODO: immutable conditions (allow_change_quorum, min_quorum, allow_inherit_perms, etc...) /// Immutable App-specific metadata #[serde(with = "serde_bytes")] pub metadata: Vec, @@ -725,39 +878,55 @@ pub enum Repository { /// Root Branch definition V0 /// /// Second commit in the root branch, signed by repository key -/// is used also to update the root branch definition when users are removed -/// DEPS: Reference to the repository commit, to get the verification_program and repo_id +/// is used also to update the root branch definition when users are removed, quorum(s) are changed, repo is moved to other store. +/// In this case, it is signed by its author, and requires an additional group signature by the total_order_quorum or by the owners_quorum. +/// DEPS: Reference to the previous root branch definition commit, if it is an update #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct RootBranchV0 { /// Branch public key ID, equal to the repo_id pub id: PubKey, - // Reference to the repository commit, to get the verification_program and repo_id - //pub repo_ref: ObjectRef, - // this can be omitted as the ref to repo is in deps. + /// Reference to the repository commit, to get the verification_program and other immutable details + pub repo: ObjectRef, + /// Store ID the repo belongs to /// the identity is checked by verifiers (check members, check overlay is matching) pub store: StoreOverlay, - /// Pub/sub topic ID for publishing events - pub topic: PubKey, + /// signature of repoId with MODIFY_STORE_KEY privkey of store + /// in order to verify that the store recognizes this repo as part of itself. + /// only if not a store root repo itself + pub store_sig: Option, + + /// Pub/sub topic ID for publishing events about the root branch + pub topic: TopicId, - /// topic private key, encrypted with the repo_secret, topic_id, branch_id + /// topic private key (a BranchWriteCapSecret), encrypted with a key derived as follow + /// BLAKE3 derive_key ("NextGraph Branch WriteCap Secret BLAKE3 key", + /// RepoWriteCapSecret, TopicId, BranchId ) + /// so that only editors of the repo can decrypt the privkey #[serde(with = "serde_bytes")] pub topic_privkey: Vec, - /// Permissions are inherited from Store Root Repo. Optional - /// (only if this repo is not a root repo itself). + /// if set, permissions are inherited from Store Repo. + /// Optional is a store_read_cap + /// (only set if this repo is not the store repo itself) /// check that it matches the self.store - pub inherit_perms: Option, + /// can only be committed by an owner + /// owners are not inherited from store + // TODO: ReadCap or PermaReadCap. If it is a ReadCap, a new RootBranch commit should be published (RefreshReadCap) every time the store read cap changes. + pub inherit_perms_users_and_quorum_from_store: Option, + + /// Quorum definition ObjectRef + /// TODO: ObjectKey should be encrypted with SIGNER_KEY ? + /// TODO: chain of trust + pub quorum: Option, /// BEC periodic reconciliation interval. zero deactivates it pub reconciliation_interval: RelTime, - /// signature of repoId with MODIFY_STORE_KEY privkey of store - /// in order to verify that the store recognizes this repo as part of itself. - /// only if not a store root repo itself - pub store_sig: Option, + // list of owners. all of them are required to sign any RootBranch that modifies the list of owners or the inherit_perms_users_and_quorum_from_store field. + pub owners: Vec, /// Mutable App-specific metadata #[serde(with = "serde_bytes")] @@ -770,18 +939,26 @@ pub enum RootBranch { V0(RootBranchV0), } -/// Quorum change V0 +impl RootBranch { + pub fn owners(&self) -> &Vec { + match self { + Self::V0(v0) => &v0.owners, + } + } +} + +/// Quorum definition V0 /// -/// Sent after RemoveUser, AddUser +/// Changed when the signers need to be updated. Signers are not necessarily editors of the repo, and they do not need to be members either, as they will be notified of RefreshReadCaps anyway. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct QuorumV0 { - /// Number of signatures required for an partial order commit to be valid + /// Number of signatures required for a partial order commit to be valid (threshold+1) pub partial_order_quorum: u32, /// List of the users who can sign for partial order pub partial_order_users: Vec, - /// Number of signatures required for a total order commit to be valid + /// Number of signatures required for a total order commit to be valid (threshold+1) pub total_order_quorum: u32, /// List of the users who can sign for total order @@ -792,7 +969,8 @@ pub struct QuorumV0 { pub metadata: Vec, } -/// Quorum change +/// Quorum definition, is part of the RootBranch commit +/// TODO: can it be sent in the root branch without being part of a RootBranch ? #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum Quorum { V0(QuorumV0), @@ -814,16 +992,22 @@ pub struct BranchV0 { /// Reference to the repository commit pub repo: ObjectRef, - /// object ID of the current root_branch commit, in order to keep in sync the branch with root_branch - pub root_branch_def_id: ObjectId, + /// object ID of the current root_branch commit (ReadCap), in order to keep in sync this branch with root_branch + /// The key is not provided as external readers should not be able to access the root branch definition. + /// it is only used by verifiers (who have the key already) + pub root_branch_readcap_id: ObjectId, /// Pub/sub topic for publishing events pub topic: PubKey, - /// topic private key, encrypted with the repo_secret, branch_id, topic_id + /// topic private key (a BranchWriteCapSecret), encrypted with a key derived as follow + /// BLAKE3 derive_key ("NextGraph Branch WriteCap Secret BLAKE3 key", + /// RepoWriteCapSecret, TopicId, BranchId ) + /// so that only editors of the repo can decrypt the privkey #[serde(with = "serde_bytes")] pub topic_privkey: Vec, + // TODO: chain of trust /// App-specific metadata #[serde(with = "serde_bytes")] pub metadata: Vec, @@ -836,16 +1020,16 @@ pub enum Branch { } /// Add a branch to the repository -/// DEPS: if update branch: previous AddBranch or UpdateBranch commit +/// DEPS: if update branch: previous AddBranch commit of the same branchId #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct AddBranchV0 { /// the new topic_id (will be needed immediately by future readers - /// in order to subscribe to the pub/sub) + /// in order to subscribe to the pub/sub). should be identical to the one in the Branch definition topic_id: TopicId, // the new branch definition commit // (we need the ObjectKey in order to open the pub/sub Event) - branch_def: ObjectRef, + branch_read_cap: ReadCap, } /// Add a branch to the repository @@ -858,7 +1042,7 @@ pub type RemoveBranchV0 = (); /// Remove a branch from the repository /// -/// DEPS: should point to the previous AddBranch/UpdateBranch, can be several in case of concurrent AddBranch. ORset logiv) +/// DEPS: should point to the previous AddBranch. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum RemoveBranch { V0(RemoveBranchV0), @@ -882,14 +1066,13 @@ pub enum AddMember { } /// Remove member from a repo +/// An owner cannot be removed +/// The overlay should be refreshed if user was malicious, after the user is removed from last repo. See REFRESH_READ_CAP on store repo. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct RemoveMemberV0 { /// Member to remove pub member: UserId, - /// Should the overlay been refreshed. This is used on the last repo, when User is removed from all the repos of the store, because user was malicious. - pub refresh_overlay: bool, - /// should this user be banned and prevented from being invited again by anybody else pub banned: bool, @@ -906,23 +1089,41 @@ pub enum RemoveMember { /// Permissions #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] -pub enum Permission { +pub enum PermissionV0 { Create, // Used internally by the creator at creation time. Not part of the permission set that can added and removed - MoveToStore, // moves the repo to another store - AddBranch, - RemoveBranch, - ChangeName, - AddMember, - RemoveMember, - ChangeQuorum, - ChangePermission, + Owner, // used internally for owners + + // + // permissions delegated by owners and admins (all admins inherit them) + // + AddReadMember, // adds a member to the repo (AddMember). without additional perm, the user is a reader. always behind SyncSignature + RemoveMember, // if user has any specific perm, RemoveWritePermission, RefreshWriteCap and/or Admin permission is needed. always behind SyncSignature + AddWritePermission, // can send AddPermission that add 3 perms to other user: WriteAsync, WriteSync, and RefreshWriteCap + WriteAsync, // can send AsyncTransaction, AddFile, RemoveFile, Snapshot, optionally with AsyncSignature + WriteSync, // can send SyncTransaction, AddFile, RemoveFile, always behind SyncSignature + Compact, // can send Compact, always behind SyncSignature + RemoveWritePermission, // can send RemovePermission that remove the WriteAsync, WriteSync or RefreshWriteCap permissions from user. RefreshWriteCap will probably be needed by the user who does the RemovePermission + + AddBranch, // can send AddBranch and Branch commits, always behind SyncSignature + RemoveBranch, // can send removeBranch, always behind SyncSignature + ChangeName, // can send AddName and RemoveName + + RefreshReadCap, // can send RefreshReadCap followed by UpdateRootBranch and/or UpdateBranch commits, with or without renewed topicIds. Always behind SyncSignature + RefreshWriteCap, // can send RefreshWriteCap followed by UpdateRootBranch and associated UpdateBranch commits on all branches, with renewed topicIds and RepoWriteCapSecret. Always behind SyncSignature + + // + // permissions delegated by owners: + // + ChangeQuorum, // can add and remove Signers, change the quorum thresholds for total order and partial order. implies the RefreshReadCap perm (without changing topicids). Always behind SyncSignature + Admin, // can administer the repo: assigns perms to other user with AddPermission and RemovePermission. RemovePermission always behind SyncSignature ChangeMainBranch, - Transaction, - Snapshot, - Chat, - Inbox, - Share, - UpdateStore, // only for store root repo (add doc, remove doc) + + // other permissions. TODO: specify them more in details + Chat, // can chat + Inbox, // can read inbox + PermaShare, // can create and answer to PermaReadCap PermaWriteCap PermaLink + UpdateStore, // only for store root repo (add doc, remove doc) to the store special branch + RefreshOverlay, // Equivalent to RefreshReadCap for the overlay special branch. } /// Add permission to a member in a repo @@ -932,12 +1133,13 @@ pub struct AddPermissionV0 { pub member: UserId, /// Permission given to user - pub permission: Permission, + pub permission: PermissionV0, /// Metadata /// (role, app level permissions, cryptographic material, etc) /// Can be some COMMON KEY privkey encrypted with the user pubkey /// If a PROOF for the common key is needed, should be sent here too + /// COMMON KEYS are: SHARE, INBOX, #[serde(with = "serde_bytes")] pub metadata: Vec, } @@ -947,6 +1149,14 @@ pub enum AddPermission { V0(AddPermissionV0), } +impl AddPermission { + pub fn permission_v0(&self) -> &PermissionV0 { + match self { + Self::V0(v0) => &v0.permission, + } + } +} + /// Remove permission from a user in a repo #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct RemovePermissionV0 { @@ -954,7 +1164,7 @@ pub struct RemovePermissionV0 { pub member: UserId, /// Permission removed from user - pub permission: Permission, + pub permission: PermissionV0, /// Metadata /// (reason, new cryptographic materials...) @@ -970,6 +1180,14 @@ pub enum RemovePermission { V0(RemovePermissionV0), } +impl RemovePermission { + pub fn permission_v0(&self) -> &PermissionV0 { + match self { + Self::V0(v0) => &v0.permission, + } + } +} + #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum RepoNamedItemV0 { Branch(BranchId), @@ -977,6 +1195,11 @@ pub enum RepoNamedItemV0 { File(ObjectId), } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum RepoNamedItem { + V0(RepoNamedItemV0), +} + /// Add a new name in the repo that can point to a branch, a commit or a file /// Or change the value of a name /// DEPS: if it is a change of value: all the previous AddName commits seen for this name @@ -987,7 +1210,7 @@ pub struct AddNameV0 { pub name: String, /// A branch, commit or file - pub item: RepoNamedItemV0, + pub item: RepoNamedItem, /// Metadata #[serde(with = "serde_bytes")] @@ -1019,18 +1242,11 @@ pub enum ChangeMainBranch { /// DEPS: all the AddName commits seen for this name #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct RemoveNameV0 { - /// Member to remove + /// name to remove /// names `main`, `chat`, `store` are reserved pub name: String, - /// Permission removed from user - pub permission: Permission, - /// Metadata - /// (reason, new cryptographic materials...) - /// If the permission was linked to a COMMON KEY, a new privkey should be generated - /// and sent to all users that still have this permission, encrypted with their respective pubkey - /// If a PROOF for the common key is needed, should be sent here too #[serde(with = "serde_bytes")] pub metadata: Vec, } @@ -1086,14 +1302,8 @@ pub enum RemoveFile { /// ACKS contains the head the snapshot was made from #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct SnapshotV0 { - // FIXME: why do we need this? - // Branch heads the snapshot was made from - // pub heads: Vec, - /// hard snapshot will erase all the CommitBody of ancestors in the branch - /// the acks will be present in header, but the CommitContent.header_keys will be set to None so the access to the acks will be lost - /// the commit_header_key of BlockV0 can be safely shared outside of the repo, as the header_keys is empty, so the heads will not be readable anyway - /// If a branch is based on a hard snapshot, it cannot be merged back into the branch where the hard snapshot was made. - pub hard: bool, + // Branch heads the snapshot was made from, can be useful when shared outside and the commit_header_key is set to None. otherwise it is redundant to ACKS + pub heads: Vec, /// Snapshot data structure #[serde(with = "serde_bytes")] @@ -1106,21 +1316,215 @@ pub enum Snapshot { V0(SnapshotV0), } -/// Threshold Signature of a commit -/// mandatory for UpdateRootBranch, AddMember, RemoveMember, Quorum, UpdateBranch, hard Snapshot, -/// DEPS: the signed commit +/// Compact: Hard Snapshot of a Branch +/// +/// Contains a data structure +/// computed from the commits at the specified head. +/// ACKS contains the head the snapshot was made from +/// +/// hard snapshot will erase all the CommitBody of ancestors in the branch +/// the compact boolean should be set in the Header too. +/// after a hard snapshot, it is recommended to refresh the read capability (to empty the topics of they keys they still hold) +/// If a branch is based on a hard snapshot, it cannot be merged back into the branch where the hard snapshot was made. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -pub struct ThresholdSignatureV0 { - // TODO: pub chain_of_trust: , - /// Threshold signature +pub struct CompactV0 { + // Branch heads the snapshot was made from, can be useful when shared outside and the commit_header_key is set to None. otherwise it is redundant to ACKS + pub heads: Vec, + + /// Snapshot data structure #[serde(with = "serde_bytes")] - pub signature: Vec, + pub content: Vec, } /// Snapshot of a Branch #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] -pub enum ThresholdSignature { - V0(ThresholdSignatureV0), +pub enum Compact { + V0(CompactV0), +} + +/// Async Threshold Signature of a commit V0 based on the partial order quorum +/// Can sign Transaction, AddFile, and Snapshot, after they have been committed to the DAG. +/// DEPS: the signed commits +// #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +// pub struct AsyncSignatureV0 { +// /// An Object containing the Threshold signature +// pub signature: ObjectRef, +// } + +/// Async Threshold Signature of a commit based on the partial order quorum +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum AsyncSignature { + V0(ObjectRef), +} + +/// Sync Threshold Signature of one or a chain of commits . V0 . based on the total order quorum +/// mandatory for UpdateRootBranch, UpdateBranch, ChangeMainBranch, AddBranch, RemoveBranch, AddMember, RemoveMember, RemovePermission, Quorum, Compact, sync Transaction, RefreshReadCap, RefreshWriteCap +/// DEPS: the last signed commit in chain +/// ACKS: previous head before the chain of signed commit(s). should be identical to the HEADS (marked as DEPS) of first commit in chain +// #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +// pub struct SyncSignatureV0 { +// /// An Object containing the Threshold signature +// pub signature: ObjectRef, +// } + +/// Threshold Signature of a commit +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum SyncSignature { + V0(ObjectRef), +} + +/// RefreshReadCap. renew the ReadCap of a `transactional` branch, or the root_branch, or all transactional branches and the root_branch. +/// Each branch forms its separate chain for that purpose. +/// can refresh the topic ids, or not +/// DEPS: current HEADS in the branch at the moment of refresh. +/// followed in the chain by a Branch or RootBranch commit (linked with ACK). The key used in EventV0 for the commit in the future of the RefreshReadCap, is the refresh_secret. +/// the chain can be, by example: RefreshReadCap -> RootBranch -> AddBranch +/// or for a transactional branch: RefreshReadCap -> Branch +/// always eventually followed at the end of each chain by a SyncSignature (each branch its own) +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum RefreshReadCapV0 { + /// A randomly generated secret (SymKey) used for the refresh process, encrypted for each Member, Signer and Owner of the repo (except the one that is being excluded, if any) + /// format to be defined (see crypto_box) + RefreshSecret(), + + // or a reference to a master RefreshReadCap commit when some transactional branches are refreshed together with the root_branch. the refresh_secret is taken from that referenced commit + MasterRefresh(ObjectRef), +} + +/// RefreshReadCap +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum RefreshReadCap { + V0(RefreshReadCapV0), +} + +/// RefreshWriteCap is always done on the root_branch, and always refreshes all the transaction branches WriteCaps, and TopicIDs. +/// DEPS: current HEADS in the branch at the moment of refresh. +/// the chain on the root_branch is : RefreshWriteCap -> RootBranch -> optional AddPermission(s) -> AddBranch +/// and on each transactional branch: RefreshWriteCap -> Branch +/// always eventually followed at the end of each chain by a SyncSignature (each branch its own) +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct RefreshWriteCapV0 { + /// A RefreshReadCapV0::RefreshSecret when on the root_branch, otherwise on transactional branches, a RefreshReadCapV0::MasterRefresh pointing to this RefreshWriteCapV0 + pub refresh_read_cap: RefreshReadCapV0, + + /// the new RepoWriteCapSecret, encrypted for each Editor (any Member that also has at least one permission, plus all the Owners). See format of RefreshSecret + // TODO: format. should be encrypted + // None when used for a transaction branch, as we don't want to duplicate this encrypted secret in each branch. + pub write_secret: Option, +} + +/// RefreshWriteCap +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum RefreshWriteCap { + V0(RefreshWriteCapV0), +} + +/// A Threshold Signature content +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct SignatureContentV0 { + /// list of all the "end of chain" commit for each branch when doing a SyncSignature, or a list of arbitrary commits to sign, for AsyncSignature. + pub commits: Vec, +} + +/// A Signature content +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum SignatureContent { + V0(SignatureContentV0), +} + +/// A Threshold Signature and the set used to generate it +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum ThresholdSignatureV0 { + PartialOrder(threshold_crypto::Signature), + TotalOrder(threshold_crypto::Signature), + Owners(threshold_crypto::Signature), +} + +/// A Threshold Signature object (not a commit) containing all the information that the signers have prepared. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct SignatureV0 { + /// the content that is signed + pub content: SignatureContent, + + /// The threshold signature itself. can come from 3 different sets + pub threshold_sig: ThresholdSignatureV0, + + /// A reference to the Certificate that should be used to verify this signature. + pub certificate_ref: ObjectRef, +} + +/// A Signature object, referenced in AsyncSignature or SyncSignature +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum Signature { + V0(SignatureV0), +} + +/// Enum for "orders" PKsets. Can be inherited from the store, in this case, it is an ObjectRef pointing to the latest Certificate of the store. +/// Or can be 2 PublicKeySets defined specially for this repo, +/// .0 one for the total_order (first one), +/// .1 the other for the partial_order (second one. is optional, as some repos are forcefully totally ordered and do not have this set). +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum OrdersPublicKeySetsV0 { + Store(ObjectRef), + Repo( + ( + threshold_crypto::PublicKeySet, + Option, + ), + ), + None, // the total_order quorum is not defined (yet, or anymore). here are no signers for the total_order, and none either for the partial_order. The owners replace them. +} + +/// A Certificate content, that will be signed by the previous certificate signers. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct CertificateContentV0 { + /// the previous certificate in the chain of trust. Can be another Certificate or the Repository commit when we are at the root of the chain of trust. + pub previous: ObjectRef, + + /// The Commit Id of the latest RootBranch definition (= the ReadCap ID) in order to keep in sync with the options for signing. + /// not used for verifying (this is why the secret is not present). + pub readcap_id: ObjectId, + + /// PublicKey Set used by the Owners. verifier uses this PKset if the signature was issued by the Owners. + pub owners_PKset: threshold_crypto::PublicKeySet, + + /// two "orders" PublicKey Sets (total_order and partial_order). + pub orders_PKsets: OrdersPublicKeySetsV0, +} + +/// A Signature of a Certificate and the threshold set or public key used to generate it +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum CertificateSignatureV0 { + /// the root Certificate is signed with the PrivKey of the Repo + Repo(Sig), + /// Any other certificate in the chain of trust is signed by the total_order quorum of the previous certificate, hence establishing the chain of trust. + TotalOrder(threshold_crypto::Signature), + /// if the previous cert's total order PKset has a threshold value of 0 or 1 (1 or 2 signers in the quorum), + /// then it is allowed that the next certificate (this one) will be signed by the owners PKset instead. + /// This is for a simple reason: if a user is removed from the list of signers in the total_order quorum, + /// then in those 2 cases, the excluded signer will probably not cooperate to their exclusion, and will not sign the new certificate. + /// to avoid deadlocks, we allow the owners to step in and sign the new cert instead. + /// The Owners are also used when there is no quorum/signer defined (OrdersPublicKeySetsV0::None). + Owners(threshold_crypto::Signature), + /// in case the new certificate being signed is an update on the store certificate (OrdersPublicKeySetsV0::Store(ObjectRef) has changed from previous cert) + /// then the signature is in that new store certificate, and not here. nothing else should have changed in the CertificateContent, and the validity of the new store cert has to be checked + Store, +} + +/// A Certificate object (not a commit) containing all the information needed to verify a signature. +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct CertificateV0 { + /// content of the certificate, which is signed here below by the previous certificate signers. + pub content: CertificateContentV0, + + /// signature over the content. + pub sig: CertificateSignatureV0, +} + +/// A certificate object +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum Certificate { + V0(CertificateV0), } /// Commit body V0 @@ -1129,36 +1533,41 @@ pub enum CommitBodyV0 { // // for root branch: // - Repository(RepositoryV0), // singleton and should be first in root_branch - RootBranch(RootBranchV0), // singleton and should be second in root_branch - UpdateRootBranch(RootBranchV0), // total order enforced with total_order_quorum - AddMember(AddMemberV0), // total order enforced with total_order_quorum - RemoveMember(RemoveMemberV0), // total order enforced with total_order_quorum - Quorum(QuorumV0), // total order enforced with total_order_quorum - AddPermission(AddPermissionV0), - RemovePermission(RemovePermissionV0), - AddBranch(AddBranchV0), - ChangeMainBranch(ChangeMainBranchV0), - RemoveBranch(RemoveBranchV0), - AddName(AddNameV0), - RemoveName(RemoveNameV0), + Repository(Repository), // singleton and should be first in root_branch + RootBranch(RootBranch), // singleton and should be second in root_branch + UpdateRootBranch(RootBranch), // total order enforced with total_order_quorum + AddMember(AddMember), // total order enforced with total_order_quorum + RemoveMember(RemoveMember), // total order enforced with total_order_quorum + AddPermission(AddPermission), + RemovePermission(RemovePermission), + AddBranch(AddBranch), + ChangeMainBranch(ChangeMainBranch), + RemoveBranch(RemoveBranch), + AddName(AddName), + RemoveName(RemoveName), + // TODO? Quorum(Quorum), // changes the quorum without changing the RootBranch // - // For regular branches: + // For transactional branches: // - Branch(BranchV0), // singleton and should be first in branch - UpdateBranch(BranchV0), // total order enforced with total_order_quorum - Snapshot(SnapshotV0), // if hard snapshot, total order enforced with total_order_quorum - Transaction(TransactionV0), - AddFile(AddFileV0), - RemoveFile(RemoveFileV0), - //Merge(MergeV0), - //Revert(RevertV0), // only possible on partial order commit + Branch(Branch), // singleton and should be first in branch + UpdateBranch(Branch), // total order enforced with total_order_quorum + Snapshot(Snapshot), // a soft snapshot + AsyncTransaction(Transaction), // partial_order + SyncTransaction(Transaction), // total_order + AddFile(AddFile), + RemoveFile(RemoveFile), + Compact(Compact), // a hard snapshot. total order enforced with total_order_quorum + //Merge(Merge), + //Revert(Revert), // only possible on partial order commit + AsyncSignature(AsyncSignature), // // For both // - ThresholdSignature(ThresholdSignatureV0), + RefreshReadCap(RefreshReadCap), + RefreshWriteCap(RefreshWriteCap), + SyncSignature(SyncSignature), } /// Commit body @@ -1167,91 +1576,22 @@ pub enum CommitBody { V0(CommitBodyV0), } -impl CommitBody { - pub fn must_be_root_commit_in_branch(&self) -> bool { - match self { - Self::V0(v0) => match v0 { - CommitBodyV0::Repository(_) => true, - CommitBodyV0::Branch(_) => true, - _ => false, - }, - } - } - pub fn total_order_required(&self) -> bool { - match self { - Self::V0(v0) => match v0 { - CommitBodyV0::UpdateRootBranch(_) => true, - CommitBodyV0::AddMember(_) => true, - CommitBodyV0::RemoveMember(_) => true, - CommitBodyV0::Quorum(_) => true, - CommitBodyV0::UpdateBranch(_) => true, - CommitBodyV0::Snapshot(s) => s.hard, - _ => false, - }, - } - } - pub fn required_permission(&self) -> HashSet<&Permission> { - let res: &[Permission]; - res = match self { - Self::V0(v0) => match v0 { - CommitBodyV0::Repository(_) => &[Permission::Create], - CommitBodyV0::RootBranch(_) => &[Permission::Create], - CommitBodyV0::UpdateRootBranch(_) => { - &[Permission::RemoveMember, Permission::MoveToStore] - } - CommitBodyV0::AddMember(_) => &[Permission::Create, Permission::AddMember], - CommitBodyV0::RemoveMember(_) => &[Permission::RemoveMember], - CommitBodyV0::Quorum(_) => &[ - Permission::Create, - Permission::AddMember, - Permission::RemoveMember, - Permission::ChangeQuorum, - ], - CommitBodyV0::AddPermission(_) => { - &[Permission::Create, Permission::ChangePermission] - } - CommitBodyV0::RemovePermission(_) => &[Permission::ChangePermission], - CommitBodyV0::AddBranch(_) => &[Permission::Create, Permission::AddBranch], - CommitBodyV0::RemoveBranch(_) => &[Permission::RemoveBranch], - CommitBodyV0::UpdateBranch(_) => { - &[Permission::RemoveMember, Permission::MoveToStore] - } - CommitBodyV0::AddName(_) => &[Permission::AddBranch, Permission::ChangeName], - CommitBodyV0::RemoveName(_) => &[Permission::ChangeName, Permission::RemoveBranch], - CommitBodyV0::Branch(_) => &[Permission::Create, Permission::AddBranch], - CommitBodyV0::ChangeMainBranch(_) => { - &[Permission::Create, Permission::ChangeMainBranch] - } - CommitBodyV0::Snapshot(_) => &[Permission::Snapshot], - CommitBodyV0::Transaction(_) => &[Permission::Transaction], - CommitBodyV0::AddFile(_) => &[Permission::Transaction], - CommitBodyV0::RemoveFile(_) => &[Permission::Transaction], - CommitBodyV0::ThresholdSignature(_) => &[ - Permission::AddMember, - Permission::ChangeQuorum, - Permission::RemoveMember, - Permission::Snapshot, - Permission::MoveToStore, - Permission::Transaction, - ], - }, - }; - HashSet::from_iter(res.iter()) - } -} - #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum QuorumType { NoSigning, PartialOrder, TotalOrder, + Owners, } /// Content of a Commit #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct CommitContentV0 { - /// Commit author (a ForwardedPeerId) - pub author: PubKey, + /// Commit author (a hash of UserId) + /// BLAKE3 keyed hash over UserId + /// - key: BLAKE3 derive_key ("NextGraph UserId Hash Overlay Id CommitContentV0 BLAKE3 key", overlayId) + /// hash will be different than for ForwardedPeerAdvertV0 so that core brokers dealing with public sites wont be able to correlate commits and editing peers (via common author's hash) + pub author: Digest, /// Author's commit sequence number pub seq: u64, @@ -1259,8 +1599,11 @@ pub struct CommitContentV0 { /// BranchId the commit belongs to (not a ref, as readers do not need to access the branch definition) pub branch: BranchId, + /// optional list of dependencies on some commits in the root branch that contain the write permission needed for this commit + pub perms: Vec, + /// Keys to be able to open all the references (deps, acks, refs, etc...) - pub header_keys: Option, + pub header_keys: Option, /// This commit can only be accepted if signed by this quorum pub quorum: QuorumType, @@ -1274,6 +1617,25 @@ pub struct CommitContentV0 { pub body: ObjectRef, } +/// Content of a Commit +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] +pub enum CommitContent { + V0(CommitContentV0), +} + +impl CommitContent { + pub fn header_keys(&self) -> &Option { + match self { + CommitContent::V0(v0) => &v0.header_keys, + } + } + pub fn author(&self) -> &Digest { + match self { + CommitContent::V0(v0) => &v0.author, + } + } +} + /// Commit object /// Signed by branch key, or a member key authorized to publish this commit type #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] @@ -1288,12 +1650,16 @@ pub struct CommitV0 { /// optional Commit Header #[serde(skip)] - pub header: Option, + pub header: Option, + + /// optional Commit Header + #[serde(skip)] + pub body: OnceCell, /// Commit content - pub content: CommitContentV0, + pub content: CommitContent, - /// Signature over the content by the author + /// Signature over the content by the author. an editor (userId) pub sig: Sig, } @@ -1324,10 +1690,13 @@ pub enum File { /// Immutable data stored encrypted in a Merkle tree V0 #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub enum ObjectContentV0 { - Commit(CommitV0), - CommitBody(CommitBodyV0), - CommitHeader(CommitHeaderV0), - File(FileV0), + Commit(Commit), + CommitBody(CommitBody), + CommitHeader(CommitHeader), + Quorum(Quorum), + Signature(Signature), + Certificate(Certificate), + File(File), } /// Immutable data stored encrypted in a Merkle tree diff --git a/p2p-repo/src/utils.rs b/p2p-repo/src/utils.rs index 5539610..a22d6ac 100644 --- a/p2p-repo/src/utils.rs +++ b/p2p-repo/src/utils.rs @@ -141,7 +141,7 @@ pub fn verify(content: &Vec, sig: Sig, pub_key: PubKey) -> Result<(), NgErro let sig_bytes = match sig { Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), }; - let sig = Signature::from_bytes(&sig_bytes)?; + let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes)?; Ok(pk.verify_strict(content, &sig)?) } diff --git a/stores-lmdb/src/repo_store.rs b/stores-lmdb/src/repo_store.rs index 3d65a7c..979a9e1 100644 --- a/stores-lmdb/src/repo_store.rs +++ b/stores-lmdb/src/repo_store.rs @@ -97,8 +97,8 @@ impl RepoStore for LmdbRepoStore { match serde_bare::from_slice::(&block_ser.to_bytes().unwrap()) { Err(_e) => Err(StorageError::InvalidValue), - Ok(o) => { - if o.id() != *block_id { + Ok(mut o) => { + if o.get_and_save_id() != *block_id { log_debug!( "Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}", block_id, diff --git a/stores-rocksdb/src/repo_store.rs b/stores-rocksdb/src/repo_store.rs index fb9c27c..463fc0f 100644 --- a/stores-rocksdb/src/repo_store.rs +++ b/stores-rocksdb/src/repo_store.rs @@ -88,8 +88,8 @@ impl RepoStore for LmdbRepoStore { match serde_bare::from_slice::(&block_ser.to_bytes().unwrap()) { Err(_e) => Err(StorageError::InvalidValue), - Ok(o) => { - if o.id() != *block_id { + Ok(mut o) => { + if o.get_and_save_id() != *block_id { log_debug!( "Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}", block_id,