refactor commits, permissions, capabilities, signature

pull/19/head
Niko PLP 7 months ago
parent 8faa4ae1eb
commit dbb397f966
  1. 190
      Cargo.lock
  2. 9
      ng-wallet/src/lib.rs
  3. 20
      ng-wallet/src/types.rs
  4. 2
      ngd/src/cli.rs
  5. 6
      ngd/src/main.rs
  6. 8
      p2p-net/src/actors/start.rs
  7. 547
      p2p-net/src/types.rs
  8. 2
      p2p-repo/Cargo.toml
  9. 45
      p2p-repo/src/block.rs
  10. 113
      p2p-repo/src/branch.rs
  11. 511
      p2p-repo/src/commit.rs
  12. 283
      p2p-repo/src/object.rs
  13. 47
      p2p-repo/src/repo.rs
  14. 20
      p2p-repo/src/site.rs
  15. 11
      p2p-repo/src/store.rs
  16. 783
      p2p-repo/src/types.rs
  17. 2
      p2p-repo/src/utils.rs
  18. 4
      stores-lmdb/src/repo_store.rs
  19. 4
      stores-rocksdb/src/repo_store.rs

190
Cargo.lock generated

@ -12,6 +12,15 @@ dependencies = [
"psl-types", "psl-types",
] ]
[[package]]
name = "addr2line"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli",
]
[[package]] [[package]]
name = "adler" name = "adler"
version = "1.0.2" version = "1.0.2"
@ -487,6 +496,21 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "backtrace"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]] [[package]]
name = "base64" name = "base64"
version = "0.13.1" version = "0.13.1"
@ -1552,6 +1576,28 @@ dependencies = [
"zune-inflate", "zune-inflate",
] ]
[[package]]
name = "failure"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86"
dependencies = [
"backtrace",
"failure_derive",
]
[[package]]
name = "failure_derive"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
"synstructure",
]
[[package]] [[package]]
name = "fastbloom-rs" name = "fastbloom-rs"
version = "0.5.3" version = "0.5.3"
@ -1588,6 +1634,31 @@ dependencies = [
"simd-adler32", "simd-adler32",
] ]
[[package]]
name = "ff"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4b967a3ee6ae993f0094174257d404a5818f58be79d67a1aea1ec8996d28906"
dependencies = [
"byteorder",
"ff_derive",
"rand_core 0.5.1",
]
[[package]]
name = "ff_derive"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3776aaf60a45037a9c3cabdd8542b38693acaa3e241ff957181b72579d29feb"
dependencies = [
"num-bigint",
"num-integer",
"num-traits",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]] [[package]]
name = "fiat-crypto" name = "fiat-crypto"
version = "0.1.20" version = "0.1.20"
@ -1971,6 +2042,12 @@ dependencies = [
"weezl", "weezl",
] ]
[[package]]
name = "gimli"
version = "0.28.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253"
[[package]] [[package]]
name = "gio" name = "gio"
version = "0.16.7" version = "0.16.7"
@ -2093,6 +2170,17 @@ dependencies = [
"system-deps", "system-deps",
] ]
[[package]]
name = "group"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f15be54742789e36f03307c8fdf0621201e1345e94f1387282024178b5e9ec8c"
dependencies = [
"ff",
"rand 0.7.3",
"rand_xorshift",
]
[[package]] [[package]]
name = "gtk" name = "gtk"
version = "0.16.2" version = "0.16.2"
@ -2256,6 +2344,12 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hex_fmt"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f"
[[package]] [[package]]
name = "html5ever" name = "html5ever"
version = "0.26.0" version = "0.26.0"
@ -3282,6 +3376,17 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "num-bigint"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304"
dependencies = [
"autocfg",
"num-integer",
"num-traits",
]
[[package]] [[package]]
name = "num-integer" name = "num-integer"
version = "0.1.45" version = "0.1.45"
@ -3371,6 +3476,15 @@ dependencies = [
"objc", "objc",
] ]
[[package]]
name = "object"
version = "0.32.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441"
dependencies = [
"memchr",
]
[[package]] [[package]]
name = "once_cell" name = "once_cell"
version = "1.18.0" version = "1.18.0"
@ -3557,11 +3671,13 @@ dependencies = [
"gloo-timers", "gloo-timers",
"hex", "hex",
"log", "log",
"once_cell",
"rand 0.7.3", "rand 0.7.3",
"serde", "serde",
"serde_bare", "serde_bare",
"serde_bytes", "serde_bytes",
"slice_as_array", "slice_as_array",
"threshold_crypto",
"time 0.3.23", "time 0.3.23",
"wasm-bindgen", "wasm-bindgen",
"web-time", "web-time",
@ -3591,6 +3707,18 @@ dependencies = [
"libm", "libm",
] ]
[[package]]
name = "pairing"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8290dea210a712682cd65031dc2b34fd132cf2729def3df7ee08f0737ff5ed6"
dependencies = [
"byteorder",
"ff",
"group",
"rand_core 0.5.1",
]
[[package]] [[package]]
name = "pango" name = "pango"
version = "0.16.5" version = "0.16.5"
@ -4125,6 +4253,15 @@ dependencies = [
"rand_core 0.5.1", "rand_core 0.5.1",
] ]
[[package]]
name = "rand_xorshift"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8"
dependencies = [
"rand_core 0.5.1",
]
[[package]] [[package]]
name = "raw-window-handle" name = "raw-window-handle"
version = "0.5.2" version = "0.5.2"
@ -4329,6 +4466,12 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]] [[package]]
name = "rustc-hash" name = "rustc-hash"
version = "1.1.0" version = "1.1.0"
@ -4917,6 +5060,18 @@ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]]
name = "synstructure"
version = "0.12.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
"unicode-xid",
]
[[package]] [[package]]
name = "system-configuration" name = "system-configuration"
version = "0.5.1" version = "0.5.1"
@ -5294,6 +5449,26 @@ dependencies = [
"once_cell", "once_cell",
] ]
[[package]]
name = "threshold_crypto"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f708705bce37e765c37a95a8e0221a327c880d5a5a148d522552e8daa85787a"
dependencies = [
"byteorder",
"failure",
"ff",
"group",
"hex_fmt",
"log",
"pairing",
"rand 0.7.3",
"rand_chacha 0.2.2",
"serde",
"tiny-keccak",
"zeroize",
]
[[package]] [[package]]
name = "tiff" name = "tiff"
version = "0.8.1" version = "0.8.1"
@ -5343,6 +5518,15 @@ dependencies = [
"time-core", "time-core",
] ]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
dependencies = [
"crunchy",
]
[[package]] [[package]]
name = "tinyvec" name = "tinyvec"
version = "1.6.0" version = "1.6.0"
@ -5662,6 +5846,12 @@ version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]]
name = "unicode-xid"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]] [[package]]
name = "unique_id" name = "unique_id"
version = "0.1.5" version = "0.1.5"

@ -497,9 +497,11 @@ pub async fn connect_wallet(
)); ));
continue; continue;
} }
let broker = broker.unwrap(); let brokers = broker.unwrap();
let mut tried: Option<(String, String, String, Option<String>, f64)> = None; let mut tried: Option<(String, String, String, Option<String>, f64)> = None;
for broker_info in broker { //TODO: on tauri (or forward in local broker, or CLI), prefer a BoxPublic to a Domain. Domain always comes first though, so we need to reorder the list
//TODO: use site.bootstraps to order the list of brokerInfo.
for broker_info in brokers {
match broker_info { match broker_info {
BrokerInfoV0::ServerV0(server) => { BrokerInfoV0::ServerV0(server) => {
let url = server.get_ws_url(&location).await; let url = server.get_ws_url(&location).await;
@ -508,7 +510,7 @@ pub async fn connect_wallet(
if url.is_some() { if url.is_some() {
let url = url.unwrap(); let url = url.unwrap();
if url.1.len() == 0 { if url.1.len() == 0 {
// TODO deal with BoxPublic and on tauri all Box... // TODO deal with Box(Dyn)Public -> tunnel, and on tauri/forward/CLIs, deal with all Box -> direct connections (when url.1.len is > 0)
let res = BROKER let res = BROKER
.write() .write()
.await .await
@ -548,6 +550,7 @@ pub async fn connect_wallet(
} }
} }
} }
// Core information is discarded
_ => {} _ => {}
} }
} }

@ -454,15 +454,11 @@ impl WalletLogV0 {
if self.is_last_occurrence(op.0, &op.1) != 0 { if self.is_last_occurrence(op.0, &op.1) != 0 {
let _ = wallet.sites.get_mut(&site.to_string()).and_then(|site| { let _ = wallet.sites.get_mut(&site.to_string()).and_then(|site| {
match store_type { match store_type {
SiteStoreType::Public => { SiteStoreType::Public => site.public.read_cap = rbdr.clone(),
site.public.root_branch_def_ref = rbdr.clone()
}
SiteStoreType::Protected => { SiteStoreType::Protected => {
site.protected.root_branch_def_ref = rbdr.clone() site.protected.read_cap = rbdr.clone()
}
SiteStoreType::Private => {
site.private.root_branch_def_ref = rbdr.clone()
} }
SiteStoreType::Private => site.private.read_cap = rbdr.clone(),
}; };
None::<SiteV0> None::<SiteV0>
}); });
@ -472,14 +468,12 @@ impl WalletLogV0 {
if self.is_last_occurrence(op.0, &op.1) != 0 { if self.is_last_occurrence(op.0, &op.1) != 0 {
let _ = wallet.sites.get_mut(&site.to_string()).and_then(|site| { let _ = wallet.sites.get_mut(&site.to_string()).and_then(|site| {
match store_type { match store_type {
SiteStoreType::Public => { SiteStoreType::Public => site.public.write_cap = secret.clone(),
site.public.repo_secret = secret.clone()
}
SiteStoreType::Protected => { SiteStoreType::Protected => {
site.protected.repo_secret = secret.clone() site.protected.write_cap = secret.clone()
} }
SiteStoreType::Private => { SiteStoreType::Private => {
site.private.repo_secret = secret.clone() site.private.write_cap = secret.clone()
} }
}; };
None::<SiteV0> None::<SiteV0>
@ -605,7 +599,7 @@ pub enum WalletOperation {
AddThirdPartyDataV0((String, Vec<u8>)), AddThirdPartyDataV0((String, Vec<u8>)),
RemoveThirdPartyDataV0(String), RemoveThirdPartyDataV0(String),
SetSiteRBDRefV0((PubKey, SiteStoreType, ObjectRef)), SetSiteRBDRefV0((PubKey, SiteStoreType, ObjectRef)),
SetSiteRepoSecretV0((PubKey, SiteStoreType, SymKey)), SetSiteRepoSecretV0((PubKey, SiteStoreType, RepoWriteCapSecret)),
} }
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;

@ -71,7 +71,7 @@ pub(crate) struct Cli {
)] )]
pub public: Option<String>, pub public: Option<String>,
/// When --public is used, this option will disallow clients to connect to the public interface too. Otherwise, by default, they can. Should be used in combination with a --domain option /// When --public or --dynamic is used, this option will disallow clients to connect to the public interface too. Otherwise, by default, they can. Should be used in combination with a --domain option
#[arg(long, conflicts_with("private"))] #[arg(long, conflicts_with("private"))]
pub public_without_clients: bool, pub public_without_clients: bool,

@ -573,7 +573,7 @@ async fn main_inner() -> Result<(), ()> {
{ {
if args.domain_peer.is_some() { if args.domain_peer.is_some() {
log_err!( log_err!(
"--local is not allowed if --domain-peer is selected, and they both use the same port. change the port of one of them. cannot start" "--local is not allowed if --domain-peer is selected, as they both use the same port. change the port of one of them. cannot start"
); );
return Err(()); return Err(());
} }
@ -591,8 +591,8 @@ async fn main_inner() -> Result<(), ()> {
} }
} }
//// --core // --core
// core listeners always come after the domain ones, which is good as the first bootstrap in the list should be the domain (if there is also a core_with_clients that generates a BoxPublic bootstrap)
if args.core.is_some() { if args.core.is_some() {
let arg_value = let arg_value =
parse_interface_and_port_for(args.core.as_ref().unwrap(), "--core", DEFAULT_PORT)?; parse_interface_and_port_for(args.core.as_ref().unwrap(), "--core", DEFAULT_PORT)?;

@ -13,7 +13,7 @@ use crate::actors::noise::Noise;
use crate::connection::NoiseFSM; use crate::connection::NoiseFSM;
use crate::types::{ use crate::types::{
AdminRequest, CoreBrokerConnect, CoreBrokerConnectResponse, CoreBrokerConnectResponseV0, AdminRequest, CoreBrokerConnect, CoreBrokerConnectResponse, CoreBrokerConnectResponseV0,
CoreMessage, CoreMessageV0, CoreResponseContentV0, CoreResponseV0, ExtResponse, CoreMessage, CoreMessageV0, CoreResponse, CoreResponseContentV0, CoreResponseV0, ExtResponse,
}; };
use crate::{actor::*, errors::ProtocolError, types::ProtocolMessage}; use crate::{actor::*, errors::ProtocolError, types::ProtocolMessage};
use async_std::sync::Mutex; use async_std::sync::Mutex;
@ -79,13 +79,13 @@ impl TryFrom<ProtocolMessage> for CoreBrokerConnectResponse {
type Error = ProtocolError; type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> { fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
if let ProtocolMessage::CoreMessage(CoreMessage::V0(CoreMessageV0::Response( if let ProtocolMessage::CoreMessage(CoreMessage::V0(CoreMessageV0::Response(
CoreResponseV0 { CoreResponse::V0(CoreResponseV0 {
content: CoreResponseContentV0::BrokerConnectResponse(a), content: CoreResponseContentV0::BrokerConnectResponse(a),
.. ..
}, }),
))) = msg ))) = msg
{ {
Ok(CoreBrokerConnectResponse::V0(a)) Ok(a)
} else { } else {
log_debug!("INVALID {:?}", msg); log_debug!("INVALID {:?}", msg);
Err(ProtocolError::InvalidValue) Err(ProtocolError::InvalidValue)

@ -137,7 +137,7 @@ pub enum BrokerServerTypeV0 {
BoxPrivate(Vec<BindAddress>), BoxPrivate(Vec<BindAddress>),
BoxPublic(Vec<BindAddress>), BoxPublic(Vec<BindAddress>),
BoxPublicDyn(Vec<BindAddress>), // can be empty BoxPublicDyn(Vec<BindAddress>), // can be empty
Domain(String), // accepts an option trailing ":port" number Domain(String), // accepts an optional trailing ":port" number
//Core(Vec<BindAddress>), //Core(Vec<BindAddress>),
} }
@ -835,7 +835,7 @@ pub enum AcceptForwardForV0 {
PublicDomain((String, String)), PublicDomain((String, String)),
/// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server /// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server
/// domain can take an option port (trailing `:port`) /// domain can take an optional port (trailing `:port`)
/// second param is the privKey of the PeerId of the proxy server, useful when the proxy server is load balancing to several daemons /// second param is the privKey of the PeerId of the proxy server, useful when the proxy server is load balancing to several daemons
/// that should all use the same PeerId to answer requests /// that should all use the same PeerId to answer requests
PublicDomainPeer((String, PrivKey, String)), PublicDomainPeer((String, PrivKey, String)),
@ -960,7 +960,7 @@ pub struct ListenerV0 {
/// when the box is behind a DMZ, and ipv6 is enabled, the private interface will get the external public IpV6. with this option we allow binding to it /// when the box is behind a DMZ, and ipv6 is enabled, the private interface will get the external public IpV6. with this option we allow binding to it
pub bind_public_ipv6: bool, pub bind_public_ipv6: bool,
/// default to false. Set to true by --core (use --core-and-clients to override to false). only useful for a public IP listener, if the clients should use another listener like --domain or --domain-private. /// default to false. Set to true by --core (use --core-with-clients to override to false). only useful for a public IP listener, if the clients should use another listener like --domain or --domain-private.
/// do not set it on a --domain or --domain-private, as this will enable the relay_websocket feature, which should not be used except by app.nextgraph.one /// do not set it on a --domain or --domain-private, as this will enable the relay_websocket feature, which should not be used except by app.nextgraph.one
pub refuse_clients: bool, pub refuse_clients: bool,
@ -1166,6 +1166,9 @@ pub type ForwardedPeerId = PubKey;
pub enum PeerId { pub enum PeerId {
Direct(DirectPeerId), Direct(DirectPeerId),
Forwarded(ForwardedPeerId), Forwarded(ForwardedPeerId),
/// BLAKE3 keyed hash over ForwardedPeerId
/// - key: BLAKE3 derive_key ("NextGraph ForwardedPeerId Hash Overlay Id BLAKE3 key", overlayId)
ForwardedObfuscated(Digest),
} }
pub type OuterOverlayId = Digest; pub type OuterOverlayId = Digest;
@ -1175,10 +1178,10 @@ pub type InnerOverlayId = Digest;
/// Overlay ID /// Overlay ID
/// ///
/// - for outer overlays that need to be discovered by public key: /// - for outer overlays that need to be discovered by public key:
/// BLAKE3 hash over the repository public key (of root repo) /// BLAKE3 hash over the public key of the store repo
/// - for inner overlays: /// - for inner overlays:
/// BLAKE3 keyed hash over the repository public key (of root repo) /// BLAKE3 keyed hash over the public key of the store repo
/// - key: BLAKE3 derive_key ("NextGraph Overlay Secret BLAKE3 key", root_secret) /// - key: BLAKE3 derive_key ("NextGraph Overlay ReadCapSecret BLAKE3 key", store repo's overlay's branch ReadCapSecret)
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] #[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum OverlayId { pub enum OverlayId {
Outer(OuterOverlayId), Outer(OuterOverlayId),
@ -1246,15 +1249,14 @@ impl OverlayAccess {
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct InnerOverlayLink { pub struct InnerOverlayLink {
/// overlay public key ID /// overlay public key ID
pub id: Identity, pub id: StoreOverlay,
/// current root branch definition commit /// The store has a special branch called `overlay` that is used to manage access to the InnerOverlay
/// The ref is split in two: id and key. /// only the ReadCapSecret is needed to access the InnerOverlay
/// The ID can be omitted if reading the overlay members should not be allowed. /// the full readcap of this branch is needed in order to subscribe to the topic and decrypt the events. The branchId can be found in the branch Definition
/// In this case, the pinning broker will not be able to subscribe to the overlay root topic /// it can be useful to subscribe to this topic i the user is at least a reader of the store's repo, so it will be notified of refreshReadCap on the overlay
/// and will therefor lose access if the overlay is refreshed. /// if the user is an external user to the store, it will lose access to the InnerOverlay after a RefreshReadCap of the overlay branch of the store.
pub root_branch_def_id: Option<ObjectId>, pub store_overlay_readcap: ReadCap,
pub root_branch_def_key: ObjectKey,
} }
/// Overlay Link /// Overlay Link
@ -1262,7 +1264,7 @@ pub struct InnerOverlayLink {
/// Details of the overlay of an NgLink /// Details of the overlay of an NgLink
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum OverlayLink { pub enum OverlayLink {
Outer(Identity), Outer(StoreOverlay),
Inner(InnerOverlayLink), Inner(InnerOverlayLink),
Inherit, Inherit,
} }
@ -1434,7 +1436,7 @@ impl ClientInfo {
/// Overlay leave request /// Overlay leave request
/// ///
/// In outerOverlay: informs the broker that the overlay is not need anymore /// In outerOverlay: informs the broker that the overlay is not needed anymore
/// In innerOverlay: Sent to all connected overlay participants to terminate a session /// In innerOverlay: Sent to all connected overlay participants to terminate a session
#[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub enum OverlayLeave { pub enum OverlayLeave {
@ -1520,8 +1522,8 @@ pub enum SubMarker {
/// Topic unsubscription request by a subscriber /// Topic unsubscription request by a subscriber
/// ///
/// A broker unsubscribes from upstream brokers /// A broker unsubscribes from all publisher brokers in the overlay
/// when it has no more subscribers left /// when it has no more local subscribers left
#[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct UnsubReqV0 { pub struct UnsubReqV0 {
/// Topic public key /// Topic public key
@ -1535,30 +1537,49 @@ pub enum UnsubReq {
} }
/// Content of EventV0 /// Content of EventV0
/// Contains the object of newly published Commit, its optional blocks, and optional refs and their blocks. /// Contains the objects of newly published Commit, its optional blocks, and optional refs and their blocks.
/// If a block is not present in the Event, its ID should be present in block_ids and the block should be put on the emitting broker beforehand with BlocksPut. /// If a block is not present in the Event, its ID should be present in block_ids and the block should be put on the emitting broker beforehand with BlocksPut.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct EventContentV0 { pub struct EventContentV0 {
/// Pub/sub topic /// Pub/sub topic
pub topic: TopicId, pub topic: TopicId,
pub publisher: ForwardedPeerId, // TODO: could be obfuscated (or not, if we want to be able to recall events)
// on public repos, should be obfuscated
pub publisher: PeerId,
/// Commit sequence number of publisher /// Commit sequence number of publisher
pub seq: u64, pub seq: u64,
/// Blocks with encrypted content. First in the list is always the commit block, the others are optional. /// Blocks with encrypted content. First in the list is always the commit block followed by its children, then its optional header and body blocks (and eventual children),
/// blocks of the REFS are optional (only sent here if user specifically want to push them to the pub/sub).
/// the first in the list MUST contain a commit_header_key
/// When saved locally (the broker keeps the associated event, until the topic is refreshed(the last heads retain their events) ),
/// so, this `blocks` list is emptied (as the blocked are saved in the overlay storage anyway) and their IDs are kept on the side.
/// then when the event needs to be send in reply to a *TopicSyncReq, the blocks list is regenerated from the IDs,
/// so that a valid EventContent can be sent (and so that its signature can be verified successfully)
pub blocks: Vec<Block>, pub blocks: Vec<Block>,
/// Ids of additional Blocks with encrypted content that are not to be pushed in the pub/sub /// Ids of additional Blocks (REFS) with encrypted content that are not to be pushed in the pub/sub
/// they will be retrieved later by interested users
pub block_ids: Vec<BlockId>, pub block_ids: Vec<BlockId>,
/// Encrypted key for the Commit object (the first Block in blocks) /// can be :
/// The key is encrypted using ChaCha20: /// * Encrypted key for the Commit object (the first Block in blocks vec)
/// - key: BLAKE3 derive_key ("NextGraph Event ObjectRef ChaCha20 key", /// The ObjectKey is encrypted using ChaCha20:
/// repo_pubkey + branch_pubkey + branch_secret + publisher) /// - key: BLAKE3 derive_key ("NextGraph Event Commit ObjectKey ChaCha20 key",
/// - nonce: commit_seq /// RepoId + BranchId + branch_secret(ReadCapSecret of the branch) + publisher)
pub key: Option<SymKey>, /// - nonce: commit_seq
/// * If it is a CertificateRefresh, both the blocks and block_ids vectors are empty.
/// the key here contains an encrypted ObjectRef to the new Certificate.
/// The whole ObjectRef is encrypted (including the ID) to avoid correlation of topics who will have the same Certificate ID (belong to the same repo)
/// Encrypted using ChaCha20, with :
/// - key: BLAKE3 derive_key ("NextGraph Event Certificate ObjectRef ChaCha20 key",
/// RepoId + BranchId + branch_secret(ReadCapSecret of the branch) + publisher)
/// it is the same key as above, because the commit_seq will be different (incremented anyway)
/// - nonce: commit_seq
#[serde(with = "serde_bytes")]
pub key: Vec<u8>,
} }
/// Pub/sub event published in a topic /// Pub/sub event published in a topic
@ -1584,7 +1605,7 @@ pub enum Event {
/// from a subscriber to one publisher at a time. /// from a subscriber to one publisher at a time.
/// fanout is always 1 /// fanout is always 1
/// if result is none, tries another path if several paths available locally /// if result is none, tries another path if several paths available locally
/// answered with a BlockResult /// answered with a stream of BlockResult
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlockSearchTopicV0 { pub struct BlockSearchTopicV0 {
/// Topic to forward the request in /// Topic to forward the request in
@ -1612,7 +1633,7 @@ pub enum BlockSearchTopic {
/// Block search along a random walk in the overlay /// Block search along a random walk in the overlay
/// fanout is always 1 /// fanout is always 1
/// if result is none, tries another path if several paths available locally /// if result is none, tries another path if several paths available locally
/// answered with a BlockResult /// answered with a stream BlockResult
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlockSearchRandomV0 { pub struct BlockSearchRandomV0 {
/// List of Block IDs to request /// List of Block IDs to request
@ -1636,7 +1657,7 @@ pub enum BlockSearchRandom {
} }
/// Response to a BlockSearch* request /// Response to a BlockSearch* request
/// /// can be a stream
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlockResultV0 { pub struct BlockResultV0 {
/// Resulting Blocks(s) /// Resulting Blocks(s)
@ -1644,6 +1665,7 @@ pub struct BlockResultV0 {
} }
/// Response to a BlockSearch* request /// Response to a BlockSearch* request
/// can be a stream
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum BlockResult { pub enum BlockResult {
V0(BlockResultV0), V0(BlockResultV0),
@ -1651,8 +1673,7 @@ pub enum BlockResult {
/// Topic synchronization request /// Topic synchronization request
/// ///
/// In response a stream of `Block`s of the requested Objects are sent /// In response a stream of `TopicSyncRes`s containing the missing Commits or events are sent
/// that are not present in the requestor's known heads and commits
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TopicSyncReqV0 { pub struct TopicSyncReqV0 {
/// Topic public key /// Topic public key
@ -1661,9 +1682,9 @@ pub struct TopicSyncReqV0 {
/// Fully synchronized until these commits /// Fully synchronized until these commits
pub known_heads: Vec<ObjectId>, pub known_heads: Vec<ObjectId>,
/// Known commit IDs since known_heads /// Stop synchronizing when these commits are met.
// TODO: is this going to be used? /// if empty, the local HEAD at the responder is used instead
pub known_commits: BloomFilter, pub target_heads: Vec<ObjectId>,
} }
/// Topic synchronization request /// Topic synchronization request
@ -1683,11 +1704,6 @@ impl TopicSyncReq {
TopicSyncReq::V0(o) => &o.known_heads, TopicSyncReq::V0(o) => &o.known_heads,
} }
} }
pub fn known_commits(&self) -> &BloomFilter {
match self {
TopicSyncReq::V0(o) => &o.known_commits,
}
}
} }
/// Status of a Forwarded Peer, sent in the Advert /// Status of a Forwarded Peer, sent in the Advert
@ -1702,11 +1718,12 @@ pub enum PeerStatus {
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ForwardedPeerAdvertV0 { pub struct ForwardedPeerAdvertV0 {
/// PeerAdvert received from Client /// PeerAdvert received from Client
// TODO: this could be obfuscated when user doesnt want to recall events.
pub peer_advert: PeerAdvertV0, pub peer_advert: PeerAdvertV0,
/// Hashed user Id, used to prevent concurrent connection from different brokers /// Hashed user Id, used to prevent concurrent connection from different brokers
/// BLAKE3 keyed hash over the UserId /// BLAKE3 keyed hash over the UserId
/// - key: BLAKE3 derive_key ("NextGraph Overlay Id BLAKE3 key", overlayId) /// - key: BLAKE3 derive_key ("NextGraph UserId Hash Overlay Id ForwardedPeerAdvertV0 BLAKE3 key", overlayId) // will always be an Inner overlay
pub user_hash: Digest, pub user_hash: Digest,
/// whether the Advert is about connection or disconnection /// whether the Advert is about connection or disconnection
@ -1915,12 +1932,27 @@ pub struct CoreOverlayJoinedAdvertV0 {
pub overlay: OverlayAdvertV0, pub overlay: OverlayAdvertV0,
} }
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreBrokerJoinedAdvert {
V0(CoreBrokerJoinedAdvertV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreBrokerLeftAdvert {
V0(CoreBrokerLeftAdvertV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreOverlayJoinedAdvert {
V0(CoreOverlayJoinedAdvertV0),
}
/// Content of CoreAdvert V0 /// Content of CoreAdvert V0
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreAdvertContentV0 { pub enum CoreAdvertContentV0 {
BrokerJoined(CoreBrokerJoinedAdvertV0), BrokerJoined(CoreBrokerJoinedAdvert),
BrokerLeft(CoreBrokerLeftAdvertV0), BrokerLeft(CoreBrokerLeftAdvert),
OverlayJoined(CoreOverlayJoinedAdvertV0), OverlayJoined(CoreOverlayJoinedAdvert),
} }
/// CoreAdvert V0 /// CoreAdvert V0
@ -1955,7 +1987,7 @@ pub struct OverlayAdvertMarkerV0 {
/// path from the new broker who started a session, to the broker that is sending the marker /// path from the new broker who started a session, to the broker that is sending the marker
pub path: Vec<DirectPeerId>, pub path: Vec<DirectPeerId>,
/// randomly generated nonce used for the reply (a ReturnPathTimingMarker) that will be sent after receiving the marker /// randomly generated nonce used for the reply (a ReturnPathTimingMarker) that will be sent back after this marker has been received on the other end
pub reply_nonce: u64, pub reply_nonce: u64,
} }
@ -1963,17 +1995,19 @@ pub struct OverlayAdvertMarkerV0 {
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CoreBlockGetV0 { pub struct CoreBlockGetV0 {
/// Block ID to request /// Block ID to request
pub id: BlockId, pub ids: Vec<BlockId>,
/// Whether or not to include all children recursively /// Whether or not to include all children recursively
pub include_children: bool, pub include_children: bool,
/// randomly generated number by requester, used for sending reply. Purpose is to defeat replay attacks in the overlay /// randomly generated number by requester, used for sending reply.
/// the requester keeps track of req_nonce and destination peerid. /// the requester keeps track of req_nonce and requested peerid.
/// used for handling the stream
pub req_nonce: u64, pub req_nonce: u64,
} }
/// Core Block Result V0 /// Core Block Result V0
/// can be a stream
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CoreBlockResultV0 { pub struct CoreBlockResultV0 {
/// Resulting Object(s) /// Resulting Object(s)
@ -1993,13 +2027,33 @@ pub struct ReturnPathTimingAdvertV0 {
pub nonce: u64, pub nonce: u64,
} }
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum OverlayAdvertMarker {
V0(OverlayAdvertMarkerV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ReturnPathTimingAdvert {
V0(ReturnPathTimingAdvertV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreBlockGet {
V0(CoreBlockGetV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreBlockResult {
V0(CoreBlockResultV0),
}
/// Content of CoreDirectMessage V0 /// Content of CoreDirectMessage V0
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreDirectMessageContentV0 { pub enum CoreDirectMessageContentV0 {
OverlayAdvertMarker(OverlayAdvertMarkerV0), OverlayAdvertMarker(OverlayAdvertMarker),
ReturnPathTimingAdvert(ReturnPathTimingAdvertV0), ReturnPathTimingAdvert(ReturnPathTimingAdvert),
BlockGet(CoreBlockGetV0), BlockGet(CoreBlockGet),
BlockResult(CoreBlockResultV0), BlockResult(CoreBlockResult),
//PostInbox, //PostInbox,
//PartialSignature, //PartialSignature,
//ClientDirectMessage //for messages between forwarded or direct peers //ClientDirectMessage //for messages between forwarded or direct peers
@ -2050,11 +2104,13 @@ pub enum CoreBrokerConnectResponse {
impl CoreBrokerConnect { impl CoreBrokerConnect {
pub fn core_message(&self, id: i64) -> CoreMessage { pub fn core_message(&self, id: i64) -> CoreMessage {
match self { match self {
CoreBrokerConnect::V0(v0) => CoreMessage::V0(CoreMessageV0::Request(CoreRequestV0 { CoreBrokerConnect::V0(v0) => {
padding: vec![], CoreMessage::V0(CoreMessageV0::Request(CoreRequest::V0(CoreRequestV0 {
id, padding: vec![],
content: CoreRequestContentV0::BrokerConnect(v0.clone()), id,
})), content: CoreRequestContentV0::BrokerConnect(CoreBrokerConnect::V0(v0.clone())),
})))
}
} }
} }
} }
@ -2066,7 +2122,7 @@ pub type CoreBrokerDisconnectV0 = ();
/// // replied with an emptyResponse, and an error code if OverlayId not present on remote broker /// // replied with an emptyResponse, and an error code if OverlayId not present on remote broker
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreOverlayJoinV0 { pub enum CoreOverlayJoinV0 {
Inner(OverlayAdvertV0), Inner(OverlayAdvert),
Outer(Digest), Outer(Digest),
} }
@ -2075,6 +2131,7 @@ pub enum CoreOverlayJoinV0 {
pub enum OuterOverlayResponseContentV0 { pub enum OuterOverlayResponseContentV0 {
EmptyResponse(()), EmptyResponse(()),
Block(Block), Block(Block),
TopicSyncRes(TopicSyncRes),
//PostInboxResponse(PostInboxResponse), //PostInboxResponse(PostInboxResponse),
} }
@ -2107,13 +2164,14 @@ pub struct OuterOverlayResponseV0 {
/// Core Topic synchronization request /// Core Topic synchronization request
/// ///
/// behaves like BlockSearchTopic (primarily searches among the publishers) /// behaves like BlockSearchTopic (primarily searches among the publishers, except if search_in_subs is set to true)
/// fanout is 1 for now /// fanout is 1 for now
/// In response a stream of `Block`s of the requested Objects are sent
/// that are not present in the requestor's known heads and commits
/// ///
/// if some target_heads are not found locally, then all successors of known_heads are sent anyway. /// If some target_heads are not found locally, all successors of known_heads are sent anyway,
/// Then this temporary HEAD is used to propagate the CoreTopicSyncReq to upstream brokers /// and then this temporary HEAD is used to propagate/fanout the CoreTopicSyncReq to upstream brokers
///
/// Answered with one or many TopicSyncRes a stream of `Block`s or Event of the commits
/// If the responder has an Event for the commit(s) in its HEAD, it will send the event instead of the plain commit's blocks.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CoreTopicSyncReqV0 { pub struct CoreTopicSyncReqV0 {
/// Topic public key /// Topic public key
@ -2125,12 +2183,9 @@ pub struct CoreTopicSyncReqV0 {
/// Fully synchronized until these commits /// Fully synchronized until these commits
pub known_heads: Vec<ObjectId>, pub known_heads: Vec<ObjectId>,
/// Stop synchronizing when these commits are met /// Stop synchronizing when these commits are met.
/// if empty, the local HEAD at the responder is used instead
pub target_heads: Vec<ObjectId>, pub target_heads: Vec<ObjectId>,
/// Known commit IDs since known_heads
// TODO: is this going to be used?
pub known_commits: BloomFilter,
} }
/// Topic synchronization request /// Topic synchronization request
@ -2139,16 +2194,45 @@ pub enum CoreTopicSyncReq {
V0(CoreTopicSyncReqV0), V0(CoreTopicSyncReqV0),
} }
/// Topic synchronization response V0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum TopicSyncResV0 {
Event(Event),
Block(Block),
}
/// Topic synchronization response
/// it is a stream of blocks and or events.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum TopicSyncRes {
V0(TopicSyncResV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreBrokerDisconnect {
V0(CoreBrokerDisconnectV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreOverlayJoin {
V0(CoreOverlayJoinV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum OuterOverlayRequest {
V0(OuterOverlayRequestV0),
}
/// Content of CoreRequest V0 /// Content of CoreRequest V0
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreRequestContentV0 { pub enum CoreRequestContentV0 {
BrokerConnect(CoreBrokerConnectV0), BrokerConnect(CoreBrokerConnect),
BrokerDisconnect(CoreBrokerDisconnectV0), BrokerDisconnect(CoreBrokerDisconnect),
OverlayJoin(CoreOverlayJoinV0), OverlayJoin(CoreOverlayJoin),
BlockSearchTopic(BlockSearchTopicV0), BlockSearchTopic(BlockSearchTopic),
BlockSearchRandom(BlockSearchRandomV0), BlockSearchRandom(BlockSearchRandom),
TopicSyncReq(CoreTopicSyncReqV0), TopicSyncReq(CoreTopicSyncReq),
OuterOverlayRequest(OuterOverlayRequestV0), OuterOverlayRequest(OuterOverlayRequest),
} }
/// CoreRequest V0 /// CoreRequest V0
@ -2178,12 +2262,18 @@ pub struct CoreBrokerConnectResponseV0 {
pub errors: Vec<OverlayId>, pub errors: Vec<OverlayId>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum OuterOverlayResponse {
V0(OuterOverlayResponseV0),
}
/// Content CoreResponse V0 /// Content CoreResponse V0
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreResponseContentV0 { pub enum CoreResponseContentV0 {
BrokerConnectResponse(CoreBrokerConnectResponseV0), BrokerConnectResponse(CoreBrokerConnectResponse),
BlockResult(BlockResultV0), BlockResult(BlockResult),
OuterOverlayResponse(OuterOverlayResponseV0), TopicSyncRes(TopicSyncRes),
OuterOverlayResponse(OuterOverlayResponse),
EmptyResponse(()), EmptyResponse(()),
} }
@ -2226,15 +2316,30 @@ pub struct OuterOverlayMessageV0 {
pub padding: Vec<u8>, pub padding: Vec<u8>,
} }
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreAdvert {
V0(CoreAdvertV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreDirectMessage {
V0(CoreDirectMessageV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum OuterOverlayMessage {
V0(OuterOverlayMessageV0),
}
/// CoreMessageV0 /// CoreMessageV0
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreMessageV0 { pub enum CoreMessageV0 {
Request(CoreRequestV0), Request(CoreRequest),
Response(CoreResponseV0), Response(CoreResponse),
Advert(CoreAdvertV0), Advert(CoreAdvert),
Direct(CoreDirectMessageV0), Direct(CoreDirectMessage),
InnerOverlay(InnerOverlayMessageV0), InnerOverlay(InnerOverlayMessage),
OuterOverlay(OuterOverlayMessageV0), OuterOverlay(OuterOverlayMessage),
} }
/// Core message /// Core message
@ -2475,6 +2580,8 @@ pub struct OpenRepoV0 {
pub overlay: OverlayAccess, pub overlay: OverlayAccess,
/// Broker peers to connect to in order to join the overlay /// Broker peers to connect to in order to join the overlay
/// can be empty for private store (the broker will not connect to any other broker)
/// but if the private repo is pinned in other brokers, those brokers should be entered here from syncing.
pub peers: Vec<PeerAdvert>, pub peers: Vec<PeerAdvert>,
/// Maximum number of peers to connect to for this overlay (only valid for an inner (RW/WO) overlay) /// Maximum number of peers to connect to for this overlay (only valid for an inner (RW/WO) overlay)
@ -2502,15 +2609,6 @@ impl OpenRepo {
} }
} }
/// Block pinning strategy. When Pinning a repo, user can choose to Pin locally on the broker:
/// all their published commits (if they are publisher) or all the commits of all the users.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum BlockPinningStrategy {
MyCommits,
AllCommits,
None,
}
/// Request to pin a repo on the broker. /// Request to pin a repo on the broker.
/// When client will disconnect, the subscriptions and publisherAdvert of the topics will be remain active on the broker, /// When client will disconnect, the subscriptions and publisherAdvert of the topics will be remain active on the broker,
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -2542,11 +2640,8 @@ pub struct PinRepoV0 {
/// only possible with inner (RW or WO) overlays. /// only possible with inner (RW or WO) overlays.
/// If the repo has previously been opened (during the same session) then rw_topics info can be omitted /// If the repo has previously been opened (during the same session) then rw_topics info can be omitted
pub rw_topics: Vec<PublisherAdvert>, pub rw_topics: Vec<PublisherAdvert>,
// TODO pub inbox_proof
/// Pin incoming commits' blocks (for subscribed topics) // TODO pub signer_proof
pub pin_all_events: bool, // TODO pub inbox_proof
// TODO pub signer_proof
} }
/// Request to pin a repo /// Request to pin a repo
@ -2563,6 +2658,35 @@ impl PinRepo {
} }
} }
/// Request to refresh the Pinning of a previously pinned repo.
/// it can consist of updating the expose_outer, the list of ro_topics and/or rw_topics,
/// and in case of a ban_member, the broker will effectively flush the topics locally after all local members except the banned one, have refreshed
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RefreshPinRepoV0 {
/// The new PinRepo info
pub pin: PinRepo,
/// optional hashed member ID that should be banned
pub ban_member: Option<Digest>,
/// when banning, list of topics that are to be flushed (once all the local members have left, except the one to be banned)
/// All the honest local members have to send this list in order for the banned one to be effectively banned
/// for each Topic, a signature over the hashed UserId to ban, by the Topic private key.
/// The banning process on the broker is meant to flush topics that would remain dangling if the malicious member would not unpin them after being removed from members of repo.
/// The userId of banned user is revealed to the local broker where it was attached, which is a breach of privacy deemed acceptable
/// as only a broker that already knew the userid will enforce it, and
/// that broker might be interested to know that the offending user was banned from a repo, as only malicious users are banned.
/// The broker might also discard this information, and just proceeed with the flush without much ado.
/// Of course, if the broker is controlled by the malicious user, it might not proceed with the ban/flush. But who cares. That broker will keep old data forever, but it is a malicious broker anyway.
pub flush_topics: Vec<(TopicId, Sig)>,
}
/// Request to pin a repo
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum RefreshPinRepo {
V0(RefreshPinRepoV0),
}
/// Request to unpin a repo on the broker. /// Request to unpin a repo on the broker.
/// When client will disconnect, the subscriptions and publisherAdvert of the topics will be removed on the broker /// When client will disconnect, the subscriptions and publisherAdvert of the topics will be removed on the broker
/// (for that user only. other users might continue to have the repo pinned) /// (for that user only. other users might continue to have the repo pinned)
@ -2680,16 +2804,18 @@ pub enum TopicUnsub {
} }
/// Request a Block by ID /// Request a Block by ID
/// commit_header_key is always set to None in the reply when request is made on OuterOverlay of protected or Group overlays
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlockGetV0 { pub struct BlockGetV0 {
/// Block ID to request /// Block IDs to request
pub id: BlockId, pub ids: Vec<BlockId>,
/// Whether or not to include all children recursively /// Whether or not to include all children recursively
pub include_children: bool, pub include_children: bool,
/// Topic the object is referenced from /// Topic the object is referenced from, if it is known by the requester.
pub topic: Option<PubKey>, /// can be used to do a BlockSearchTopic in the core overlay.
pub topic: Option<TopicId>,
} }
/// Request an object by ID /// Request an object by ID
@ -2699,9 +2825,9 @@ pub enum BlockGet {
} }
impl BlockGet { impl BlockGet {
pub fn id(&self) -> BlockId { pub fn ids(&self) -> &Vec<BlockId> {
match self { match self {
BlockGet::V0(o) => o.id, BlockGet::V0(o) => &o.ids,
} }
} }
pub fn include_children(&self) -> bool { pub fn include_children(&self) -> bool {
@ -2738,6 +2864,7 @@ impl BlocksPut {
} }
/// Request to know if some blocks are present locally /// Request to know if some blocks are present locally
/// used by client before publishing an event, to know what to push
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlocksExistV0 { pub struct BlocksExistV0 {
/// Ids of Blocks to check /// Ids of Blocks to check
@ -2826,27 +2953,27 @@ impl ObjectDel {
/// Content of `ClientRequestV0` /// Content of `ClientRequestV0`
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ClientRequestContentV0 { pub enum ClientRequestContentV0 {
OpenRepo(OpenRepoV0), OpenRepo(OpenRepo),
PinRepo(PinRepoV0), PinRepo(PinRepo),
UnpinRepo(UnpinRepoV0), UnpinRepo(UnpinRepo),
RepoPinStatusReq(RepoPinStatusReqV0), RepoPinStatusReq(RepoPinStatusReq),
// once repo is opened or pinned: // once repo is opened or pinned:
TopicSub(TopicSubV0), TopicSub(TopicSub),
TopicUnsub(TopicUnsubV0), TopicUnsub(TopicUnsub),
BlocksExist(BlocksExistV0), BlocksExist(BlocksExist),
BlockGet(BlockGetV0), BlockGet(BlockGet),
TopicSyncReq(TopicSyncReqV0), TopicSyncReq(TopicSyncReq),
// For Pinned Repos only : // For Pinned Repos only :
ObjectPin(ObjectPinV0), ObjectPin(ObjectPin),
ObjectUnpin(ObjectUnpinV0), ObjectUnpin(ObjectUnpin),
ObjectDel(ObjectDelV0), ObjectDel(ObjectDel),
// For InnerOverlay's only : // For InnerOverlay's only :
BlocksPut(BlocksPutV0), BlocksPut(BlocksPut),
PublishEvent(EventV0), PublishEvent(Event),
} }
/// Broker overlay request /// Broker overlay request
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -2918,8 +3045,9 @@ impl BlocksFound {
pub enum ClientResponseContentV0 { pub enum ClientResponseContentV0 {
EmptyResponse, EmptyResponse,
Block(Block), Block(Block),
BlocksFound(BlocksFoundV0), TopicSyncRes(TopicSyncRes),
RepoPinStatus(RepoPinStatusV0), BlocksFound(BlocksFound),
RepoPinStatus(RepoPinStatus),
} }
/// Response to a `ClientRequest` /// Response to a `ClientRequest`
@ -2975,6 +3103,7 @@ pub enum ClientMessageContentV0 {
ClientRequest(ClientRequest), ClientRequest(ClientRequest),
ClientResponse(ClientResponse), ClientResponse(ClientResponse),
ForwardedEvent(Event), ForwardedEvent(Event),
ForwardedBlock(Block),
} }
/// Broker message for an overlay /// Broker message for an overlay
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
@ -3030,7 +3159,8 @@ impl ClientMessage {
ClientMessage::V0(o) => match &o.content { ClientMessage::V0(o) => match &o.content {
ClientMessageContentV0::ClientResponse(r) => r.id(), ClientMessageContentV0::ClientResponse(r) => r.id(),
ClientMessageContentV0::ClientRequest(r) => r.id(), ClientMessageContentV0::ClientRequest(r) => r.id(),
ClientMessageContentV0::ForwardedEvent(_) => { ClientMessageContentV0::ForwardedEvent(_)
| ClientMessageContentV0::ForwardedBlock(_) => {
panic!("it is an event") panic!("it is an event")
} }
}, },
@ -3041,7 +3171,8 @@ impl ClientMessage {
ClientMessage::V0(o) => match &mut o.content { ClientMessage::V0(o) => match &mut o.content {
ClientMessageContentV0::ClientResponse(ref mut r) => r.set_id(id), ClientMessageContentV0::ClientResponse(ref mut r) => r.set_id(id),
ClientMessageContentV0::ClientRequest(ref mut r) => r.set_id(id), ClientMessageContentV0::ClientRequest(ref mut r) => r.set_id(id),
ClientMessageContentV0::ForwardedEvent(_) => { ClientMessageContentV0::ForwardedEvent(_)
| ClientMessageContentV0::ForwardedBlock(_) => {
panic!("it is an event") panic!("it is an event")
} }
}, },
@ -3051,10 +3182,9 @@ impl ClientMessage {
match self { match self {
ClientMessage::V0(o) => match &o.content { ClientMessage::V0(o) => match &o.content {
ClientMessageContentV0::ClientResponse(r) => r.result(), ClientMessageContentV0::ClientResponse(r) => r.result(),
ClientMessageContentV0::ClientRequest(r) => { ClientMessageContentV0::ClientRequest(_)
panic!("it is not a response"); | ClientMessageContentV0::ForwardedEvent(_)
} | ClientMessageContentV0::ForwardedBlock(_) => {
ClientMessageContentV0::ForwardedEvent(_) => {
panic!("it is not a response"); panic!("it is not a response");
} }
}, },
@ -3064,10 +3194,9 @@ impl ClientMessage {
match self { match self {
ClientMessage::V0(o) => match &o.content { ClientMessage::V0(o) => match &o.content {
ClientMessageContentV0::ClientResponse(r) => r.block(), ClientMessageContentV0::ClientResponse(r) => r.block(),
ClientMessageContentV0::ClientRequest(r) => { ClientMessageContentV0::ClientRequest(_)
panic!("it is not a response"); | ClientMessageContentV0::ForwardedEvent(_)
} | ClientMessageContentV0::ForwardedBlock(_) => {
ClientMessageContentV0::ForwardedEvent(_) => {
panic!("it is not a response"); panic!("it is not a response");
} }
}, },
@ -3463,42 +3592,51 @@ impl From<AuthResult> for ProtocolMessage {
} }
// //
// DIRECT / OUT-OF-BAND MESSAGES // LINKS
// //
/// Link/invitation to the repository /// Link to a repository
/// Consists of an identifier (repoid), a ReadCap or WriteCap, and a locator (peers and overlayLink)
/// Those capabilities are not durable: They can be refreshed by the members and previously shared Caps will become obsolete/revoked.
/// As long as the user is a member of the repo and subscribes to the root topic (of the repo, and of the store if needed/applicable), they will receive the updated capabilities.
/// But if they don't subscribe, they will lose access after the refresh.
/// For durable read capabilities of non-members, see PermaReadCap.
/// In most cases, the link is shared and the recipient opens it and subscribes soon afterward.
/// Perma capabilities are needed only when the link is stored on disk and kept there unopened for a long period.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RepoLinkV0 { pub struct RepoLinkV0 {
/// Repository public key ID /// Repository ID
pub id: Identity, pub id: RepoId,
pub overlay: OverlayLink, /// read capability for the whole repo
/// current (at the time of sharing the link) root branch definition commit
pub read_cap: ReadCap,
/// Repository secret. Only set for editors /// Write capability secret. Only set for editors. in this case, overlay MUST be set to an InnerOverlay
pub repo_secret: Option<SymKey>, pub write_cap_secret: Option<RepoWriteCapSecret>,
/// current root branch definition commit /// Current overlay link, used to join the overlay
pub root_branch_def_ref: ObjectRef, pub overlay: OverlayLink,
/// Peer brokers to connect to /// Peer brokers to connect to
pub peers: Vec<PeerAdvert>, pub peers: Vec<PeerAdvert>,
} }
/// Link/invitation to the repository /// Link to a repository
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum RepoLink { pub enum RepoLink {
V0(RepoLinkV0), V0(RepoLinkV0),
} }
impl RepoLink { impl RepoLink {
pub fn id(&self) -> &Identity { pub fn id(&self) -> &RepoId {
match self { match self {
RepoLink::V0(o) => &o.id, RepoLink::V0(o) => &o.id,
} }
} }
pub fn secret(&self) -> &Option<SymKey> { pub fn write_cap_secret(&self) -> &Option<RepoWriteCapSecret> {
match self { match self {
RepoLink::V0(o) => &o.repo_secret, RepoLink::V0(o) => &o.write_cap_secret,
} }
} }
pub fn peers(&self) -> &Vec<PeerAdvert> { pub fn peers(&self) -> &Vec<PeerAdvert> {
@ -3508,43 +3646,122 @@ impl RepoLink {
} }
} }
/// Link to object(s) or to a branch from a repository /// The latest ReadCap of the branch (or main branch) will be downloaded from the outerOverlay, if the peer brokers listed below allow it.
/// that can be shared to non-members /// The snapshot can be downloaded instead
/// This locator is durable, because the public site are served differently by brokers.
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ObjectLinkV0 { pub struct PublicRepoLocatorV0 {
/// Request to send to an overlay peer /// Repository ID
pub req: ExtRequest, pub repo: RepoId,
/// optional branchId to access. a specific public branch,
/// if not set, the main branch of the repo will be used.
pub branch: Option<BranchId>,
/// optional commits of head to access.
/// if not set, the main branch of the repo will be used.
pub heads: Vec<ObjectRef>,
/// optional snapshot to download, in order to display the content quicker to end-user.
pub snapshot: Option<ObjectRef>,
/// The public site store
pub public_store: PubKey,
/// Peer brokers to connect to
pub peers: Vec<PeerAdvert>,
}
/// Keys for the root blocks of the requested objects /// Link to a public repository
pub keys: Vec<ObjectRef>, #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum PublicRepoLocator {
V0(PublicRepoLocatorV0),
} }
/// Link to object(s) or to a branch from a repository /// Read access to a branch of a Public, Protected or Group store.
/// that can be shared to non-members /// The overlay to join can be the outer or the inner, depending on what was offered in the link.
/// The difference between the two is that in the outer overlay, only one broker is contacted.
/// In the inner overlay, all the publisher's brokers are contacted, so subscription to the pub/sub is more reliable, less prone to outage.
/// This is not a durable link. If the topic has been refreshed, the pubsub won't be able to be subscribed to,
/// but TopicSyncReq will still work (answering the commits up until the moment the topic was refreshed)
/// and the optional heads will always be retrievable
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ObjectLink { pub struct ReadBranchLinkV0 {
V0(ObjectLinkV0), /// Repository ID
pub repo: RepoId,
pub branch: BranchId, // must match the one in read_cap
/// an optional list of heads that can fetched in this branch
/// useful if a specific head is to be shared
pub heads: Vec<ObjectRef>,
/// read capability for the branch
/// current (at the time of sharing the link) branch definition commit
pub read_cap: ReadCap,
/// Current overlay link, used to join the overlay, most of the time, an outerOverlay is preferred
pub overlay: OverlayLink,
/// Peer brokers to connect to
pub peers: Vec<PeerAdvert>,
} }
/// Owned repository with private key /// Link to a repository
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct RepoKeysV0 { pub enum ReadBranchLink {
/// Repository private key V0(ReadBranchLinkV0),
pub key: PrivKey, }
/// Repository secret /// Obtains one or more objects of a repo (Commit, File) by their ID.
pub secret: SymKey, /// On an outerOverlay, the header is always emptied (no way to reconstruct the DAG of commits) except on public overlays or if a topicId is provided
/// If the intent is to share a whole DAG of commits at a definite CommitID/HEAD, then ReadBranchLink should be used instead (or PublicRepoLocator if public site)
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ObjectLinkV0 {
/// Repository ID: not used to make the request. but useful for commits, to know which repo they are from without needing to fetch and open the full DAG of commits.
/// (but the one here might be wrong. only when opening the DAG can the real repo be known. also note that on outerOverlay of non public stores, the DAG is not accessible)
/// note that it could be omitted, specially if the objects are files. As files are content-addressable and belong to an overlay but not to a specific repo or topic.
pub repo: Option<RepoId>,
/// An optional topic that will be used to retrieve the Certificate of a commit, if needed
/// (topic has to be checked with the one inside the commit. the one here might be wrong. it is provided here as an optimization)
/// or can be used to help with BlockSearchTopic.
/// If the topic is provided, a TopicSyncReq can be performed, and the causal past of the commit will appear (by repeated tried while narrowing down on the ancestors),
/// hence defeating the "emptied header" protection
pub topic: Option<TopicId>,
pub objects: Vec<ObjectRef>,
/// Overlay to join
pub overlay: OverlayLink,
/// Peers to connect to /// Peer brokers to connect to
pub peers: Vec<PeerAdvert>, pub peers: Vec<PeerAdvert>,
} }
/// Owned repository with private key /// Link to a specific commit, without its causal past
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub enum RepoKeys { pub enum ObjectLink {
V0(RepoKeysV0), V0(ObjectLinkV0),
} }
/// NextGraph Link V0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum NgLinkV0 {
Repo(RepoLink),
PublicRepo(PublicRepoLocator),
Branch(ReadBranchLink),
Object(ObjectLink),
}
/// NextGraph Link
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum NgLink {
V0(NgLinkV0),
}
// TODO: PermaLinks and PostInbox (and ExtRequests)
#[cfg(test)] #[cfg(test)]
mod test { mod test {

@ -27,8 +27,10 @@ web-time = "0.2.0"
wasm-bindgen = "0.2" wasm-bindgen = "0.2"
slice_as_array = "1.1.0" slice_as_array = "1.1.0"
curve25519-dalek = "3.2.0" curve25519-dalek = "3.2.0"
threshold_crypto = "0.4.0"
zeroize = { version = "1.6.0", features = ["zeroize_derive"] } zeroize = { version = "1.6.0", features = ["zeroize_derive"] }
time = { version= "0.3.23", features = ["formatting"] } time = { version= "0.3.23", features = ["formatting"] }
once_cell = "1.17.1"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies] [target.'cfg(not(target_arch = "wasm32"))'.dependencies]
debug_print = "1.0.0" debug_print = "1.0.0"

@ -16,16 +16,18 @@ use crate::types::*;
impl BlockV0 { impl BlockV0 {
pub fn new( pub fn new(
children: Vec<BlockId>, children: Vec<BlockId>,
header_ref: Option<ObjectRef>, mut header_ref: Option<CommitHeaderRef>,
content: Vec<u8>, content: Vec<u8>,
key: Option<SymKey>, key: Option<SymKey>,
) -> BlockV0 { ) -> BlockV0 {
let (commit_header_id, commit_header_key) = header_ref.map_or((None, None), |obj_ref| { let (commit_header, commit_header_key) = header_ref
(Some(obj_ref.id), Some(obj_ref.key)) .take()
}); .map_or((CommitHeaderObject::None, None), |obj_ref| {
(obj_ref.obj, Some(obj_ref.key))
});
let bc = BlockContentV0 { let bc = BlockContentV0 {
children, children,
commit_header_id, commit_header: commit_header,
encrypted_content: content, encrypted_content: content,
}; };
let mut b = BlockV0 { let mut b = BlockV0 {
@ -65,12 +67,12 @@ impl BlockContent {
} }
} }
/// Get the header id // /// Get the header id
pub fn header_id(&self) -> &Option<ObjectId> { // pub fn header_id(&self) -> &Option<ObjectId> {
match self { // match self {
BlockContent::V0(bc) => &bc.commit_header_id, // BlockContent::V0(bc) => &bc.commit_header_id,
} // }
} // }
/// Get the children /// Get the children
pub fn children(&self) -> &Vec<BlockId> { pub fn children(&self) -> &Vec<BlockId> {
@ -83,7 +85,7 @@ impl BlockContent {
impl Block { impl Block {
pub fn new( pub fn new(
children: Vec<BlockId>, children: Vec<BlockId>,
header_ref: Option<ObjectRef>, header_ref: Option<CommitHeaderRef>,
content: Vec<u8>, content: Vec<u8>,
key: Option<SymKey>, key: Option<SymKey>,
) -> Block { ) -> Block {
@ -136,13 +138,20 @@ impl Block {
} }
} }
/// Get the header /// Get the header reference
pub fn header_ref(&self) -> Option<ObjectRef> { pub fn header_ref(&self) -> Option<CommitHeaderRef> {
match self { match self {
Block::V0(b) => b.commit_header_key.as_ref().map(|key| ObjectRef { Block::V0(b) => match b.commit_header_key.as_ref() {
key: key.clone(), Some(key) => match b.content.commit_header_obj() {
id: b.content.header_id().unwrap().clone(), CommitHeaderObject::None => None,
}), _ => Some(CommitHeaderRef {
obj: b.content.commit_header_obj().clone(),
key: key.clone(),
}),
},
None => None,
},
} }
} }

@ -24,17 +24,17 @@ impl BranchV0 {
pub fn new( pub fn new(
id: PubKey, id: PubKey,
repo: ObjectRef, repo: ObjectRef,
root_branch_def_id: ObjectId, root_branch_readcap_id: ObjectId,
topic_priv: PrivKey, topic_priv: PrivKey,
metadata: Vec<u8>, metadata: Vec<u8>,
) -> BranchV0 { ) -> BranchV0 {
let topic_privkey: Vec<u8> = vec![]; let topic_privkey: Vec<u8> = vec![];
//TODO: topic_privkey is topic_priv encrypted with the repo_secret, branch_id, topic_id //TODO: topic_privkey is topic_priv encrypted with RepoWriteCapSecret, TopicId, BranchId
let topic = topic_priv.to_pub(); let topic = topic_priv.to_pub();
BranchV0 { BranchV0 {
id, id,
repo, repo,
root_branch_def_id, root_branch_readcap_id,
topic, topic,
topic_privkey, topic_privkey,
metadata, metadata,
@ -46,14 +46,14 @@ impl Branch {
pub fn new( pub fn new(
id: PubKey, id: PubKey,
repo: ObjectRef, repo: ObjectRef,
root_branch_def_id: ObjectId, root_branch_readcap_id: ObjectId,
topic_priv: PrivKey, topic_priv: PrivKey,
metadata: Vec<u8>, metadata: Vec<u8>,
) -> Branch { ) -> Branch {
Branch::V0(BranchV0::new( Branch::V0(BranchV0::new(
id, id,
repo, repo,
root_branch_def_id, root_branch_readcap_id,
topic_priv, topic_priv,
metadata, metadata,
)) ))
@ -81,8 +81,8 @@ impl Branch {
visited: &mut HashSet<ObjectId>, visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>, missing: &mut HashSet<ObjectId>,
) -> Result<bool, ObjectParseError> { ) -> Result<bool, ObjectParseError> {
//log_debug!(">>> load_branch: {}", cobj.id());
let id = cobj.id(); let id = cobj.id();
//log_debug!(">>> load_branch: {}", id);
// root has no acks // root has no acks
let is_root = cobj.is_root(); let is_root = cobj.is_root();
@ -180,17 +180,23 @@ mod test {
pub fn test_branch() { pub fn test_branch() {
fn add_obj( fn add_obj(
content: ObjectContentV0, content: ObjectContentV0,
header: Option<CommitHeaderV0>, header: Option<CommitHeader>,
repo_pubkey: PubKey, store_pubkey: &StoreRepo,
repo_secret: SymKey, store_secret: &ReadCapSecret,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
) -> ObjectRef { ) -> ObjectRef {
let max_object_size = 4000; let max_object_size = 4000;
let obj = Object::new(content, header, max_object_size, repo_pubkey, repo_secret); let mut obj = Object::new(
ObjectContent::V0(content),
header,
max_object_size,
store_pubkey,
store_secret,
);
log_debug!(">>> add_obj"); log_debug!(">>> add_obj");
log_debug!(" id: {:?}", obj.id()); log_debug!(" id: {:?}", obj.id());
log_debug!(" header: {:?}", obj.header()); log_debug!(" header: {:?}", obj.header());
obj.save(store).unwrap(); obj.save_in_test(store).unwrap();
obj.reference().unwrap() obj.reference().unwrap()
} }
@ -202,11 +208,11 @@ mod test {
deps: Vec<ObjectRef>, deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>, acks: Vec<ObjectRef>,
body_ref: ObjectRef, body_ref: ObjectRef,
repo_pubkey: PubKey, store_pubkey: &StoreRepo,
repo_secret: SymKey, store_secret: &ReadCapSecret,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
) -> ObjectRef { ) -> ObjectRef {
let header = CommitHeaderV0::new_with_deps_and_acks( let header = CommitHeader::new_with_deps_and_acks(
deps.iter().map(|r| r.id).collect(), deps.iter().map(|r| r.id).collect(),
acks.iter().map(|r| r.id).collect(), acks.iter().map(|r| r.id).collect(),
); );
@ -236,45 +242,45 @@ mod test {
.unwrap(); .unwrap();
//log_debug!("commit: {:?}", commit); //log_debug!("commit: {:?}", commit);
add_obj( add_obj(
ObjectContentV0::Commit(commit), ObjectContentV0::Commit(Commit::V0(commit)),
header, header,
repo_pubkey, store_pubkey,
repo_secret, store_secret,
store, store,
) )
} }
fn add_body_branch( fn add_body_branch(
branch: BranchV0, branch: BranchV0,
repo_pubkey: PubKey, store_pubkey: &StoreRepo,
repo_secret: SymKey, store_secret: &ReadCapSecret,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
) -> ObjectRef { ) -> ObjectRef {
let body = CommitBodyV0::Branch(branch); let body: CommitBodyV0 = CommitBodyV0::Branch(Branch::V0(branch));
//log_debug!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContentV0::CommitBody(body), ObjectContentV0::CommitBody(CommitBody::V0(body)),
None, None,
repo_pubkey, store_pubkey,
repo_secret, store_secret,
store, store,
) )
} }
fn add_body_trans( fn add_body_trans(
header: Option<CommitHeaderV0>, header: Option<CommitHeader>,
repo_pubkey: PubKey, store_pubkey: &StoreRepo,
repo_secret: SymKey, store_secret: &ReadCapSecret,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
) -> ObjectRef { ) -> ObjectRef {
let content = [7u8; 777].to_vec(); let content = [7u8; 777].to_vec();
let body = CommitBodyV0::Transaction(content); let body = CommitBodyV0::AsyncTransaction(Transaction::V0(content));
//log_debug!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContentV0::CommitBody(body), ObjectContentV0::CommitBody(CommitBody::V0(body)),
header, header,
repo_pubkey, store_pubkey,
repo_secret, store_secret,
store, store,
) )
} }
@ -298,6 +304,7 @@ mod test {
let repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes()); let repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes());
let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes()); let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes());
let repo_secret = SymKey::ChaCha20Key([9; 32]); let repo_secret = SymKey::ChaCha20Key([9; 32]);
let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(repo_pubkey));
// branch // branch
@ -314,8 +321,8 @@ mod test {
let repo = Repo::new_with_member( let repo = Repo::new_with_member(
&repo_pubkey, &repo_pubkey,
member_pubkey, &member_pubkey,
&[Permission::Transaction], &[PermissionV0::WriteAsync],
store, store,
); );
@ -352,14 +359,10 @@ mod test {
// commit bodies // commit bodies
let branch_body = add_body_branch( let branch_body =
branch.clone(), add_body_branch(branch.clone(), &store_repo, &repo_secret, repo.get_store());
repo_pubkey.clone(),
repo_secret.clone(),
repo.get_store(),
);
let trans_body = add_body_trans(None, repo_pubkey, repo_secret.clone(), repo.get_store()); let trans_body = add_body_trans(None, &store_repo, &repo_secret, repo.get_store());
// create & add commits to store // create & add commits to store
@ -372,8 +375,8 @@ mod test {
vec![], vec![],
vec![], vec![],
branch_body.clone(), branch_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
@ -386,8 +389,8 @@ mod test {
vec![br.clone()], vec![br.clone()],
vec![], vec![],
trans_body.clone(), trans_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
@ -400,8 +403,8 @@ mod test {
vec![br.clone()], vec![br.clone()],
vec![], vec![],
trans_body.clone(), trans_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
@ -428,8 +431,8 @@ mod test {
vec![t2.clone()], vec![t2.clone()],
vec![t1.clone()], vec![t1.clone()],
trans_body.clone(), trans_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
@ -442,8 +445,8 @@ mod test {
vec![t1.clone(), t2.clone()], vec![t1.clone(), t2.clone()],
vec![t4.clone()], vec![t4.clone()],
trans_body.clone(), trans_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
@ -456,8 +459,8 @@ mod test {
vec![t4.clone()], vec![t4.clone()],
vec![], vec![],
trans_body.clone(), trans_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
@ -470,13 +473,13 @@ mod test {
vec![t4.clone()], vec![t4.clone()],
vec![], vec![],
trans_body.clone(), trans_body.clone(),
repo_pubkey, &store_repo,
repo_secret.clone(), &repo_secret,
repo.get_store(), repo.get_store(),
); );
let c7 = Commit::load(a7.clone(), repo.get_store()).unwrap(); let mut c7 = Commit::load(a7.clone(), repo.get_store(), true).unwrap();
c7.verify(&repo, repo.get_store()).unwrap(); c7.verify(&repo).unwrap();
let mut filter = Filter::new(FilterBuilder::new(10, 0.01)); let mut filter = Filter::new(FilterBuilder::new(10, 0.01));
for commit_ref in [br, t1, t2, t5.clone(), a6.clone()] { for commit_ref in [br, t1, t2, t5.clone(), a6.clone()] {

@ -12,6 +12,7 @@
//! Commit //! Commit
use ed25519_dalek::*; use ed25519_dalek::*;
use once_cell::sync::OnceCell;
use crate::errors::NgError; use crate::errors::NgError;
use crate::log::*; use crate::log::*;
@ -26,9 +27,13 @@ use std::iter::FromIterator;
pub enum CommitLoadError { pub enum CommitLoadError {
MissingBlocks(Vec<BlockId>), MissingBlocks(Vec<BlockId>),
ObjectParseError, ObjectParseError,
DeserializeError, NotACommitError,
NotACommitBodyError,
CannotBeAtRootOfBranch, CannotBeAtRootOfBranch,
MustBeAtRootOfBranch, MustBeAtRootOfBranch,
BodyLoadError,
HeaderLoadError,
BodyTypeMismatch,
} }
#[derive(Debug)] #[derive(Debug)]
@ -56,9 +61,10 @@ impl CommitV0 {
metadata: Vec<u8>, metadata: Vec<u8>,
body: ObjectRef, body: ObjectRef,
) -> Result<CommitV0, SignatureError> { ) -> Result<CommitV0, SignatureError> {
let headers = CommitHeaderV0::new_with(deps, ndeps, acks, nacks, refs, nrefs); let headers = CommitHeader::new_with(deps, ndeps, acks, nacks, refs, nrefs);
let content = CommitContentV0 { let content = CommitContentV0 {
author: author_pubkey, perms: vec![],
author: (&author_pubkey).into(),
seq, seq,
branch, branch,
header_keys: headers.1, header_keys: headers.1,
@ -81,11 +87,12 @@ impl CommitV0 {
ss[1].copy_from_slice(it.next().unwrap()); ss[1].copy_from_slice(it.next().unwrap());
let sig = Sig::Ed25519Sig(ss); let sig = Sig::Ed25519Sig(ss);
Ok(CommitV0 { Ok(CommitV0 {
content, content: CommitContent::V0(content),
sig, sig,
id: None, id: None,
key: None, key: None,
header: headers.0, header: headers.0,
body: OnceCell::new(),
}) })
} }
} }
@ -125,10 +132,38 @@ impl Commit {
.map(|c| Commit::V0(c)) .map(|c| Commit::V0(c))
} }
pub fn save(
&mut self,
block_size: usize,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl RepoStore + ?Sized>,
) -> Result<ObjectRef, StorageError> {
match self {
Commit::V0(v0) => {
let mut obj = Object::new(
ObjectContent::V0(ObjectContentV0::Commit(Commit::V0(v0.clone()))),
v0.header.clone(),
block_size,
store_pubkey,
store_secret,
);
obj.save(store)?;
if let Some(h) = &mut v0.header {
h.set_id(obj.header().as_ref().unwrap().id().unwrap());
}
self.set_id(obj.get_and_save_id());
self.set_key(obj.key().unwrap());
Ok(obj.reference().unwrap())
}
}
}
/// Load commit from store /// Load commit from store
pub fn load( pub fn load(
commit_ref: ObjectRef, commit_ref: ObjectRef,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
with_body: bool,
) -> Result<Commit, CommitLoadError> { ) -> Result<Commit, CommitLoadError> {
let (id, key) = (commit_ref.id, commit_ref.key); let (id, key) = (commit_ref.id, commit_ref.key);
match Object::load(id, Some(key.clone()), store) { match Object::load(id, Some(key.clone()), store) {
@ -138,14 +173,17 @@ impl Commit {
.map_err(|_e| CommitLoadError::ObjectParseError)?; .map_err(|_e| CommitLoadError::ObjectParseError)?;
let mut commit = match content { let mut commit = match content {
ObjectContent::V0(ObjectContentV0::Commit(c)) => c, ObjectContent::V0(ObjectContentV0::Commit(c)) => c,
_ => return Err(CommitLoadError::DeserializeError), _ => return Err(CommitLoadError::NotACommitError),
}; };
commit.id = Some(id); commit.set_id(id);
commit.key = Some(key.clone()); commit.set_key(key.clone());
if let Some(CommitHeader::V0(header_v0)) = obj.header() { commit.set_header(obj.header().clone());
commit.header = Some(header_v0.clone());
if with_body {
commit.load_body(store)?;
} }
Ok(Commit::V0(commit))
Ok(commit)
} }
Err(ObjectParseError::MissingBlocks(missing)) => { Err(ObjectParseError::MissingBlocks(missing)) => {
Err(CommitLoadError::MissingBlocks(missing)) Err(CommitLoadError::MissingBlocks(missing))
@ -158,8 +196,10 @@ impl Commit {
pub fn load_body( pub fn load_body(
&self, &self,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
) -> Result<CommitBody, CommitLoadError> { ) -> Result<&CommitBody, CommitLoadError> {
// TODO store body in CommitV0 (with #[serde(skip)]) as a cache for subsequent calls to load_body if self.body().is_some() {
return Ok(self.body().unwrap());
}
let content = self.content_v0(); let content = self.content_v0();
let (id, key) = (content.body.id, content.body.key.clone()); let (id, key) = (content.body.id, content.body.key.clone());
let obj = Object::load(id.clone(), Some(key.clone()), store).map_err(|e| match e { let obj = Object::load(id.clone(), Some(key.clone()), store).map_err(|e| match e {
@ -170,39 +210,69 @@ impl Commit {
.content() .content()
.map_err(|_e| CommitLoadError::ObjectParseError)?; .map_err(|_e| CommitLoadError::ObjectParseError)?;
match content { match content {
ObjectContent::V0(ObjectContentV0::CommitBody(body)) => Ok(CommitBody::V0(body)), ObjectContent::V0(ObjectContentV0::CommitBody(body)) => {
_ => Err(CommitLoadError::DeserializeError), self.set_body(body);
Ok(self.body().unwrap())
}
_ => Err(CommitLoadError::NotACommitBodyError),
} }
} }
/// Get ID of parent `Object` fn set_body(&self, body: CommitBody) {
match self {
Commit::V0(c) => {
c.body.set(body).unwrap();
}
}
}
/// Get ID of including `Object`,
/// only available if the Commit was loaded from store or saved
pub fn id(&self) -> Option<ObjectId> { pub fn id(&self) -> Option<ObjectId> {
match self { match self {
Commit::V0(c) => c.id, Commit::V0(c) => c.id,
} }
} }
/// Set ID of parent `Object` /// Get ID of header `Object`
pub fn set_id(&mut self, id: ObjectId) { pub fn header_id(&self) -> &Option<ObjectId> {
match self {
Commit::V0(CommitV0 {
header: Some(ch), ..
}) => ch.id(),
_ => &None,
}
}
/// Set ID of including `Object`
fn set_id(&mut self, id: ObjectId) {
match self { match self {
Commit::V0(c) => c.id = Some(id), Commit::V0(c) => c.id = Some(id),
} }
} }
/// Get key of parent `Object` /// Get key of including `Object`
/// only available if the Commit was loaded from store or saved
pub fn key(&self) -> Option<SymKey> { pub fn key(&self) -> Option<SymKey> {
match self { match self {
Commit::V0(c) => c.key.clone(), Commit::V0(c) => c.key.clone(),
} }
} }
/// Set key of parent `Object` /// Set key of including `Object`
pub fn set_key(&mut self, key: SymKey) { fn set_key(&mut self, key: SymKey) {
match self { match self {
Commit::V0(c) => c.key = Some(key), Commit::V0(c) => c.key = Some(key),
} }
} }
/// Set header of including `Object`
fn set_header(&mut self, header: Option<CommitHeader>) {
match self {
Commit::V0(c) => c.header = header,
}
}
/// Get commit signature /// Get commit signature
pub fn sig(&self) -> &Sig { pub fn sig(&self) -> &Sig {
match self { match self {
@ -210,32 +280,90 @@ impl Commit {
} }
} }
/// Get commit signature
pub fn header(&self) -> &Option<CommitHeader> {
match self {
Commit::V0(c) => &c.header,
}
}
/// Get commit content V0 /// Get commit content V0
pub fn content_v0(&self) -> &CommitContentV0 { pub fn content_v0(&self) -> &CommitContentV0 {
match self { match self {
Commit::V0(c) => &c.content, Commit::V0(CommitV0 {
content: CommitContent::V0(c),
..
}) => c,
}
}
pub fn body(&self) -> Option<&CommitBody> {
match self {
Commit::V0(c) => c.body.get(),
}
}
pub fn owners_signature_required(
&self,
store: &Box<impl RepoStore + ?Sized>,
) -> Result<bool, CommitLoadError> {
match self.load_body(store)? {
CommitBody::V0(CommitBodyV0::UpdateRootBranch(new_root)) => {
// load deps (the previous RootBranch commit)
let deps = self.deps();
if deps.len() != 1 {
Err(CommitLoadError::HeaderLoadError)
} else {
let previous_rootbranch_commit = Commit::load(deps[0].clone(), store, true)?;
let previous_rootbranch = previous_rootbranch_commit
.body()
.unwrap()
.root_branch_commit()?;
if previous_rootbranch.owners() != new_root.owners() {
Ok(true)
} else {
Ok(false)
}
}
}
CommitBody::V0(CommitBodyV0::RootBranch(_)) => {
let deps = self.deps();
let acks = self.acks();
if deps.len() == 0 && acks.len() == 1 {
// we check that the ACK is the repository singleton commit. in this case, it means we are dealing with the first RootBranch commit, which is fine to have no deps.
let causal_past = Commit::load(acks[0].clone(), store, true)?;
if causal_past.body().unwrap().is_repository_singleton_commit() {
return Ok(false);
}
}
Err(CommitLoadError::HeaderLoadError)
}
_ => Ok(false),
} }
} }
/// This commit is the first one in the branch (doesn't have any ACKs nor Nacks) /// This commit is the first one in the branch (doesn't have any ACKs nor Nacks)
pub fn is_root_commit_of_branch(&self) -> bool { pub fn is_root_commit_of_branch(&self) -> bool {
match self { match self {
Commit::V0(c) => match &c.content.header_keys { Commit::V0(CommitV0 {
Some(hk) => hk.acks.is_empty() && hk.nacks.is_empty(), content: CommitContent::V0(c),
..
}) => match &c.header_keys {
Some(CommitHeaderKeys::V0(hk)) => hk.acks.is_empty() && hk.nacks.is_empty(),
None => true, None => true,
}, },
_ => unimplemented!(), _ => unimplemented!(),
} }
} }
/// Get acks /// Get acks (that have both an ID in the header and a key in the header_keys)
pub fn acks(&self) -> Vec<ObjectRef> { pub fn acks(&self) -> Vec<ObjectRef> {
let mut res: Vec<ObjectRef> = vec![]; let mut res: Vec<ObjectRef> = vec![];
match self { match self {
Commit::V0(c) => match &c.header { Commit::V0(c) => match &c.header {
Some(header_v0) => match &c.content.header_keys { Some(CommitHeader::V0(header_v0)) => match &c.content.header_keys() {
Some(hk) => { Some(CommitHeaderKeys::V0(hk_v0)) => {
for ack in header_v0.acks.iter().zip(hk.acks.iter()) { for ack in header_v0.acks.iter().zip(hk_v0.acks.iter()) {
res.push(ack.into()); res.push(ack.into());
} }
} }
@ -248,14 +376,14 @@ impl Commit {
res res
} }
/// Get deps /// Get deps (that have both an ID in the header and a key in the header_keys)
pub fn deps(&self) -> Vec<ObjectRef> { pub fn deps(&self) -> Vec<ObjectRef> {
let mut res: Vec<ObjectRef> = vec![]; let mut res: Vec<ObjectRef> = vec![];
match self { match self {
Commit::V0(c) => match &c.header { Commit::V0(c) => match &c.header {
Some(header_v0) => match &c.content.header_keys { Some(CommitHeader::V0(header_v0)) => match &c.content.header_keys() {
Some(hk) => { Some(CommitHeaderKeys::V0(hk_v0)) => {
for dep in header_v0.deps.iter().zip(hk.deps.iter()) { for dep in header_v0.deps.iter().zip(hk_v0.deps.iter()) {
res.push(dep.into()); res.push(dep.into());
} }
} }
@ -268,23 +396,22 @@ impl Commit {
res res
} }
/// Get all commits that are in the direct causal past of the commit (`deps`, `acks`, `nacks`, `ndeps`) /// Get all commits that are in the direct causal past of the commit (`deps`, `acks`, `nacks`)
/// only returns objectRefs that have both an ID from header and a KEY from header_keys (it couldn't be otherwise)
pub fn direct_causal_past(&self) -> Vec<ObjectRef> { pub fn direct_causal_past(&self) -> Vec<ObjectRef> {
let mut res: Vec<ObjectRef> = vec![]; let mut res: Vec<ObjectRef> = vec![];
match self { match self {
Commit::V0(c) => match (&c.header, &c.content.header_keys) { Commit::V0(c) => match (&c.header, &c.content.header_keys()) {
(Some(header_v0), Some(hk)) => { (Some(CommitHeader::V0(header_v0)), Some(CommitHeaderKeys::V0(hk_v0))) => {
for ack in header_v0.acks.iter().zip(hk.acks.iter()) { for ack in header_v0.acks.iter().zip(hk_v0.acks.iter()) {
res.push(ack.into()); res.push(ack.into());
} }
for nack in header_v0.nacks.iter().zip(hk.nacks.iter()) { for nack in header_v0.nacks.iter().zip(hk_v0.nacks.iter()) {
res.push(nack.into()); res.push(nack.into());
} }
for dep in header_v0.deps.iter().zip(hk.deps.iter()) { for dep in header_v0.deps.iter().zip(hk_v0.deps.iter()) {
res.push(dep.into()); res.push(dep.into());
} //TODO deal with deps that are also in acks. should nt be added twice
for ndep in header_v0.ndeps.iter().zip(hk.ndeps.iter()) {
res.push(ndep.into());
} }
} }
_ => {} _ => {}
@ -297,7 +424,10 @@ impl Commit {
/// Get seq /// Get seq
pub fn seq(&self) -> u64 { pub fn seq(&self) -> u64 {
match self { match self {
Commit::V0(c) => c.content.seq, Commit::V0(CommitV0 {
content: CommitContent::V0(c),
..
}) => c.seq,
} }
} }
@ -307,16 +437,18 @@ impl Commit {
Commit::V0(c) => c, Commit::V0(c) => c,
}; };
let content_ser = serde_bare::to_vec(&c.content).unwrap(); let content_ser = serde_bare::to_vec(&c.content).unwrap();
let pubkey = match c.content.author { unimplemented!();
PubKey::Ed25519PubKey(pk) => pk, // FIXME : lookup author in member's list
_ => panic!("author cannot have a Montgomery key"), // let pubkey = match c.content.author() {
}; // PubKey::Ed25519PubKey(pk) => pk,
let pk = PublicKey::from_bytes(&pubkey)?; // _ => panic!("author cannot have a Montgomery key"),
let sig_bytes = match c.sig { // };
Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), // let pk = PublicKey::from_bytes(pubkey)?;
}; // let sig_bytes = match c.sig {
let sig = Signature::from_bytes(&sig_bytes)?; // Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(),
pk.verify_strict(&content_ser, &sig) // };
// let sig = Signature::from_bytes(&sig_bytes)?;
// pk.verify_strict(&content_ser, &sig)
} }
/// Verify commit permissions /// Verify commit permissions
@ -325,7 +457,9 @@ impl Commit {
.map_err(|_| CommitVerifyError::PermissionDenied) .map_err(|_| CommitVerifyError::PermissionDenied)
} }
/// Verify if the commit's `body`, `header` and direct_causal_past, and recursively all their refs are available in the `store` /// Verify if the commit's `body` and its direct_causal_past, and recursively all their refs are available in the `store`
/// returns a list of all the ObjectIds that have been visited (only commits in the DAG)
/// or a list of missing blocks
pub fn verify_full_object_refs_of_branch_at_commit( pub fn verify_full_object_refs_of_branch_at_commit(
&self, &self,
store: &Box<impl RepoStore + ?Sized>, store: &Box<impl RepoStore + ?Sized>,
@ -342,49 +476,53 @@ impl Commit {
) -> Result<(), CommitLoadError> { ) -> Result<(), CommitLoadError> {
//log_debug!(">>> load_branch: #{}", commit.seq()); //log_debug!(">>> load_branch: #{}", commit.seq());
// FIXME: what about this comment? seems like a Commit always has an id // if the self of verify_full_object_refs_of_branch_at_commit() has not been saved yet, then it doesn't have an ID
// the self of verify_full_object_refs_of_branch_at_commit() may not have an ID set,
// but the commits loaded from store should have it
match commit.id() { match commit.id() {
Some(id) => { Some(id) => {
if visited.contains(&id) { if visited.contains(&id) {
return Ok(()); return Ok(());
} }
visited.insert(id); visited.insert(id);
// not adding the ObjectId of the header of this commit as it is not part of the DAG (neither is the CommitBody added to visited)
// // commit.header_id().map(|hid| visited.insert(hid));
}
None => {
if !visited.is_empty() {
// we are not at the beginning (meaning, the self/the commit object) so this is a panic error as all causal
// past commits have been loaded from store and should have an id
panic!("A Commit in the causal past doesn't have an ID");
}
} }
None => panic!("Commit without an ID"),
} }
// load body & check if it's the Branch root commit // load body & check if it's the Branch root commit
match commit.load_body(store) { match commit.load_body(store) {
Ok(body) => { Ok(_) => Ok(()),
if commit.is_root_commit_of_branch() {
if body.must_be_root_commit_in_branch() {
Ok(())
} else {
Err(CommitLoadError::CannotBeAtRootOfBranch)
}
} else {
if body.must_be_root_commit_in_branch() {
Err(CommitLoadError::MustBeAtRootOfBranch)
} else {
Ok(())
}
}
}
Err(CommitLoadError::MissingBlocks(m)) => { Err(CommitLoadError::MissingBlocks(m)) => {
// The commit body is missing. // The commit body is missing.
missing.extend(m); missing.extend(m);
Ok(()) Err(CommitLoadError::BodyLoadError)
} }
Err(e) => Err(e), Err(e) => Err(e),
}?; }?;
let body = commit.body().unwrap();
visited.insert(commit.content_v0().body.id);
if commit.is_root_commit_of_branch() {
if !body.must_be_root_commit_in_branch() {
return Err(CommitLoadError::CannotBeAtRootOfBranch);
}
} else {
if body.must_be_root_commit_in_branch() {
return Err(CommitLoadError::MustBeAtRootOfBranch);
}
}
// load direct causal past // load direct causal past
for blockref in commit.direct_causal_past() { for blockref in commit.direct_causal_past() {
match Commit::load(blockref, store) { match Commit::load(blockref, store, true) {
Ok(c) => { Ok(mut c) => {
load_direct_object_refs(&c, store, visited, missing)?; load_direct_object_refs(&mut c, store, visited, missing)?;
} }
Err(CommitLoadError::MissingBlocks(m)) => { Err(CommitLoadError::MissingBlocks(m)) => {
missing.extend(m); missing.extend(m);
@ -407,11 +545,7 @@ impl Commit {
} }
/// Verify signature, permissions, and full causal past /// Verify signature, permissions, and full causal past
pub fn verify( pub fn verify(&self, repo: &Repo) -> Result<(), CommitVerifyError> {
&self,
repo: &Repo,
store: &Box<impl RepoStore + ?Sized>,
) -> Result<(), CommitVerifyError> {
self.verify_sig() self.verify_sig()
.map_err(|_e| CommitVerifyError::InvalidSignature)?; .map_err(|_e| CommitVerifyError::InvalidSignature)?;
self.verify_perm(repo)?; self.verify_perm(repo)?;
@ -421,6 +555,218 @@ impl Commit {
} }
} }
impl PermissionV0 {
/// the kind of permissions that can be added and removed with AddWritePermission and RemoveWritePermission permissions respectively
pub fn is_write_permission(&self) -> bool {
match self {
Self::WriteAsync | Self::WriteSync | Self::RefreshWriteCap => true,
_ => false,
}
}
pub fn is_delegated_by_admin(&self) -> bool {
self.is_write_permission()
|| match self {
Self::AddReadMember
| Self::RemoveMember
| Self::AddWritePermission
| Self::RemoveWritePermission
| Self::Compact
| Self::AddBranch
| Self::RemoveBranch
| Self::ChangeName
| Self::RefreshReadCap => true,
_ => false,
}
}
pub fn is_delegated_by_owner(&self) -> bool {
self.is_delegated_by_admin()
|| match self {
Self::ChangeQuorum | Self::Admin | Self::ChangeMainBranch => true,
_ => false,
}
}
}
impl CommitBody {
pub fn root_branch_commit(&self) -> Result<&RootBranch, CommitLoadError> {
match self {
Self::V0(v0) => match v0 {
CommitBodyV0::UpdateRootBranch(rb) | CommitBodyV0::RootBranch(rb) => Ok(rb),
_ => Err(CommitLoadError::BodyTypeMismatch),
},
}
}
pub fn is_repository_singleton_commit(&self) -> bool {
match self {
Self::V0(v0) => match v0 {
CommitBodyV0::Repository(_) => true,
_ => false,
},
}
}
pub fn must_be_root_commit_in_branch(&self) -> bool {
match self {
Self::V0(v0) => match v0 {
CommitBodyV0::Repository(_) => true,
CommitBodyV0::Branch(_) => true,
_ => false,
},
}
}
pub fn on_root_branch(&self) -> bool {
match self {
Self::V0(v0) => match v0 {
CommitBodyV0::Repository(_) => true,
CommitBodyV0::RootBranch(_) => true,
CommitBodyV0::UpdateRootBranch(_) => true,
CommitBodyV0::ChangeMainBranch(_) => true,
CommitBodyV0::AddBranch(_) => true,
CommitBodyV0::RemoveBranch(_) => true,
CommitBodyV0::AddMember(_) => true,
CommitBodyV0::RemoveMember(_) => true,
CommitBodyV0::AddPermission(_) => true,
CommitBodyV0::RemovePermission(_) => true,
CommitBodyV0::AddName(_) => true,
CommitBodyV0::RemoveName(_) => true,
//CommitBodyV0::Quorum(_) => true,
CommitBodyV0::RefreshReadCap(_) => true,
CommitBodyV0::RefreshWriteCap(_) => true,
CommitBodyV0::SyncSignature(_) => true,
_ => false,
},
}
}
pub fn on_transactional_branch(&self) -> bool {
match self {
Self::V0(v0) => match v0 {
CommitBodyV0::Branch(_) => true,
CommitBodyV0::UpdateBranch(_) => true,
CommitBodyV0::Snapshot(_) => true,
CommitBodyV0::AsyncTransaction(_) => true,
CommitBodyV0::SyncTransaction(_) => true,
CommitBodyV0::AddFile(_) => true,
CommitBodyV0::RemoveFile(_) => true,
CommitBodyV0::Compact(_) => true,
CommitBodyV0::AsyncSignature(_) => true,
CommitBodyV0::RefreshReadCap(_) => true,
CommitBodyV0::RefreshWriteCap(_) => true,
CommitBodyV0::SyncSignature(_) => true,
_ => false,
},
}
}
pub fn total_order_required(&self) -> bool {
match self {
Self::V0(v0) => match v0 {
CommitBodyV0::UpdateRootBranch(_) => true,
CommitBodyV0::UpdateBranch(_) => true,
CommitBodyV0::ChangeMainBranch(_) => true,
CommitBodyV0::AddBranch(_) => true,
CommitBodyV0::RemoveBranch(_) => true,
CommitBodyV0::AddMember(_) => true,
CommitBodyV0::RemoveMember(_) => true,
CommitBodyV0::RemovePermission(_) => true,
//CommitBodyV0::Quorum(_) => true,
CommitBodyV0::Compact(_) => true,
CommitBodyV0::SyncTransaction(_) => true, // check Quorum::TotalOrder in CommitContent
CommitBodyV0::RefreshReadCap(_) => true,
CommitBodyV0::RefreshWriteCap(_) => true,
_ => false,
},
}
}
pub fn required_permission(&self) -> HashSet<PermissionV0> {
let res: Vec<PermissionV0>;
res = match self {
Self::V0(v0) => match v0 {
CommitBodyV0::Repository(_) => vec![PermissionV0::Create],
CommitBodyV0::RootBranch(_) => vec![PermissionV0::Create],
CommitBodyV0::UpdateRootBranch(_) => vec![
PermissionV0::ChangeQuorum,
PermissionV0::RefreshWriteCap,
PermissionV0::RefreshReadCap,
PermissionV0::RefreshOverlay,
],
CommitBodyV0::AddMember(_) => {
vec![PermissionV0::Create, PermissionV0::AddReadMember]
}
CommitBodyV0::RemoveMember(_) => vec![PermissionV0::RemoveMember],
CommitBodyV0::AddPermission(addp) => {
let mut perms = vec![PermissionV0::Create];
if addp.permission_v0().is_delegated_by_admin() {
perms.push(PermissionV0::Admin);
}
if addp.permission_v0().is_write_permission() {
perms.push(PermissionV0::AddWritePermission);
}
perms
}
CommitBodyV0::RemovePermission(remp) => {
let mut perms = vec![];
if remp.permission_v0().is_delegated_by_admin() {
perms.push(PermissionV0::Admin);
}
if remp.permission_v0().is_write_permission() {
perms.push(PermissionV0::RemoveWritePermission);
}
perms
}
CommitBodyV0::AddBranch(_) => vec![
PermissionV0::Create,
PermissionV0::AddBranch,
PermissionV0::RefreshReadCap,
PermissionV0::RefreshWriteCap,
PermissionV0::RefreshOverlay,
],
CommitBodyV0::RemoveBranch(_) => vec![PermissionV0::RemoveBranch],
CommitBodyV0::UpdateBranch(_) => {
vec![PermissionV0::RefreshReadCap, PermissionV0::RefreshWriteCap]
}
CommitBodyV0::AddName(_) => vec![PermissionV0::AddBranch, PermissionV0::ChangeName],
CommitBodyV0::RemoveName(_) => {
vec![PermissionV0::ChangeName, PermissionV0::RemoveBranch]
}
CommitBodyV0::Branch(_) => vec![PermissionV0::Create, PermissionV0::AddBranch],
CommitBodyV0::ChangeMainBranch(_) => {
vec![PermissionV0::Create, PermissionV0::ChangeMainBranch]
}
CommitBodyV0::Snapshot(_) => vec![PermissionV0::WriteAsync],
CommitBodyV0::Compact(_) => vec![PermissionV0::Compact],
CommitBodyV0::AsyncTransaction(_) => vec![PermissionV0::WriteAsync],
CommitBodyV0::AddFile(_) => vec![PermissionV0::WriteAsync, PermissionV0::WriteSync],
CommitBodyV0::RemoveFile(_) => {
vec![PermissionV0::WriteAsync, PermissionV0::WriteSync]
}
CommitBodyV0::SyncTransaction(_) => vec![PermissionV0::WriteSync],
CommitBodyV0::AsyncSignature(_) => vec![PermissionV0::WriteAsync],
CommitBodyV0::SyncSignature(_) => vec![
PermissionV0::WriteSync,
PermissionV0::ChangeQuorum,
PermissionV0::RefreshWriteCap,
PermissionV0::RefreshReadCap,
PermissionV0::RefreshOverlay,
PermissionV0::ChangeMainBranch,
PermissionV0::AddBranch,
PermissionV0::RemoveBranch,
PermissionV0::AddReadMember,
PermissionV0::RemoveMember,
PermissionV0::RemoveWritePermission,
PermissionV0::Compact,
],
CommitBodyV0::RefreshReadCap(_) => vec![PermissionV0::RefreshReadCap],
CommitBodyV0::RefreshWriteCap(_) => vec![PermissionV0::RefreshWriteCap],
},
};
HashSet::from_iter(res.iter().cloned())
}
}
mod test { mod test {
use std::collections::HashMap; use std::collections::HashMap;
@ -483,8 +829,7 @@ mod test {
let store = Box::new(HashMapRepoStore::new()); let store = Box::new(HashMapRepoStore::new());
let repo = let repo = Repo::new_with_member(&pub_key, &pub_key, &[PermissionV0::WriteAsync], store);
Repo::new_with_member(&pub_key, pub_key.clone(), &[Permission::Transaction], store);
//let body = CommitBody::Ack(Ack::V0()); //let body = CommitBody::Ack(Ack::V0());
//log_debug!("body: {:?}", body); //log_debug!("body: {:?}", body);
@ -511,7 +856,7 @@ mod test {
Err(e) => panic!("Commit verify error: {:?}", e), Err(e) => panic!("Commit verify error: {:?}", e),
} }
match commit.verify(&repo, repo.get_store()) { match commit.verify(&repo) {
Ok(_) => panic!("Commit should not be Ok"), Ok(_) => panic!("Commit should not be Ok"),
Err(CommitVerifyError::BodyLoadError(CommitLoadError::MissingBlocks(missing))) => { Err(CommitVerifyError::BodyLoadError(CommitLoadError::MissingBlocks(missing))) => {
assert_eq!(missing.len(), 1); assert_eq!(missing.len(), 1);

@ -25,6 +25,8 @@ use crate::types::*;
/// Size of a serialized empty Block /// Size of a serialized empty Block
const EMPTY_BLOCK_SIZE: usize = 12 + 1; const EMPTY_BLOCK_SIZE: usize = 12 + 1;
/// Max size of an embedded CommitHeader
const MAX_EMBEDDED_COMMIT_HEADER_SIZE: usize = 100;
/// Size of a serialized BlockId /// Size of a serialized BlockId
const BLOCK_ID_SIZE: usize = 33; const BLOCK_ID_SIZE: usize = 33;
/// Size of serialized SymKey /// Size of serialized SymKey
@ -49,6 +51,12 @@ pub struct Object {
/// Header /// Header
header: Option<CommitHeader>, header: Option<CommitHeader>,
/// Blocks of the Header (nodes of the tree)
header_blocks: Vec<Block>,
#[cfg(test)]
already_saved: bool,
} }
/// Object parsing errors /// Object parsing errors
@ -80,8 +88,11 @@ pub enum ObjectCopyError {
} }
impl Object { impl Object {
fn convergence_key(repo_pubkey: PubKey, repo_secret: SymKey) -> [u8; blake3::OUT_LEN] { fn convergence_key(
let key_material = match (repo_pubkey, repo_secret) { store_pubkey: &StoreRepo,
store_readcap_secret: &ReadCapSecret,
) -> [u8; blake3::OUT_LEN] {
let key_material = match (*store_pubkey.repo_id(), store_readcap_secret.clone()) {
(PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => { (PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => {
[pubkey, secret].concat() [pubkey, secret].concat()
} }
@ -94,7 +105,7 @@ impl Object {
content: &[u8], content: &[u8],
conv_key: &[u8; blake3::OUT_LEN], conv_key: &[u8; blake3::OUT_LEN],
children: Vec<ObjectId>, children: Vec<ObjectId>,
header_ref: Option<ObjectRef>, header_ref: Option<CommitHeaderRef>,
) -> Block { ) -> Block {
let key_hash = blake3::keyed_hash(conv_key, content); let key_hash = blake3::keyed_hash(conv_key, content);
let nonce = [0u8; 12]; let nonce = [0u8; 12];
@ -114,28 +125,39 @@ impl Object {
fn make_header_v0( fn make_header_v0(
header: CommitHeaderV0, header: CommitHeaderV0,
object_size: usize, object_size: usize,
repo_pubkey: PubKey, store: &StoreRepo,
repo_secret: SymKey, store_secret: &ReadCapSecret,
) -> ObjectRef { ) -> (ObjectRef, Vec<Block>) {
let header_obj = Object::new( let header_obj = Object::new(
ObjectContentV0::CommitHeader(header), ObjectContent::V0(ObjectContentV0::CommitHeader(CommitHeader::V0(header))),
None, None,
object_size, object_size,
repo_pubkey, store,
repo_secret, store_secret,
); );
let header_ref = ObjectRef { let header_ref = ObjectRef {
id: header_obj.id(), id: header_obj.id(),
key: header_obj.key().unwrap(), key: header_obj.key().unwrap(),
}; };
header_ref (header_ref, header_obj.blocks)
}
fn make_header(
header: CommitHeader,
object_size: usize,
store: &StoreRepo,
store_secret: &ReadCapSecret,
) -> (ObjectRef, Vec<Block>) {
match header {
CommitHeader::V0(v0) => Self::make_header_v0(v0, object_size, store, store_secret),
}
} }
/// Build tree from leaves, returns parent nodes /// Build tree from leaves, returns parent nodes
fn make_tree( fn make_tree(
leaves: &[Block], leaves: &[Block],
conv_key: &ChaCha20Key, conv_key: &ChaCha20Key,
header_ref: &Option<ObjectRef>, mut header_ref: Option<CommitHeaderRef>,
arity: usize, arity: usize,
) -> Vec<Block> { ) -> Vec<Block> {
let mut parents = vec![]; let mut parents = vec![];
@ -146,17 +168,17 @@ impl Object {
let children = nodes.iter().map(|block| block.id()).collect(); let children = nodes.iter().map(|block| block.id()).collect();
let content = ChunkContentV0::InternalNode(keys); let content = ChunkContentV0::InternalNode(keys);
let content_ser = serde_bare::to_vec(&content).unwrap(); let content_ser = serde_bare::to_vec(&content).unwrap();
let child_header = None; //let child_header = None;
let header = if parents.is_empty() && it.peek().is_none() { let header = if parents.is_empty() && it.peek().is_none() {
header_ref header_ref.take()
} else { } else {
&child_header None
}; };
parents.push(Self::make_block( parents.push(Self::make_block(
content_ser.as_slice(), content_ser.as_slice(),
conv_key, conv_key,
children, children,
header.clone(), header,
)); ));
} }
//log_debug!("parents += {}", parents.len()); //log_debug!("parents += {}", parents.len());
@ -178,14 +200,14 @@ impl Object {
/// * `content`: Object content /// * `content`: Object content
/// * `header`: CommitHeaderV0 : All references of the object /// * `header`: CommitHeaderV0 : All references of the object
/// * `block_size`: Desired block size for chunking content, rounded up to nearest valid block size /// * `block_size`: Desired block size for chunking content, rounded up to nearest valid block size
/// * `repo_pubkey`: Repository public key /// * `store`: store public key
/// * `repo_secret`: Repository secret /// * `store_secret`: store's read capability secret
pub fn new( pub fn new(
content: ObjectContentV0, content: ObjectContent,
header: Option<CommitHeaderV0>, mut header: Option<CommitHeader>,
block_size: usize, block_size: usize,
repo_pubkey: PubKey, store: &StoreRepo,
repo_secret: SymKey, store_secret: &ReadCapSecret,
) -> Object { ) -> Object {
// create blocks by chunking + encrypting content // create blocks by chunking + encrypting content
let valid_block_size = store_valid_value_size(block_size); let valid_block_size = store_valid_value_size(block_size);
@ -193,16 +215,42 @@ impl Object {
let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA; let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA;
let mut blocks: Vec<Block> = vec![]; let mut blocks: Vec<Block> = vec![];
let conv_key = Self::convergence_key(repo_pubkey, repo_secret.clone()); let conv_key = Self::convergence_key(store, store_secret);
let header_ref = header let (header_ref, header_blocks) = match &header {
.clone() None => (None, vec![]),
.map(|ch| Self::make_header_v0(ch, valid_block_size, repo_pubkey, repo_secret.clone())); Some(h) => {
let res = Self::make_header(h.clone(), valid_block_size, store, store_secret);
if res.1.len() == 1
&& res.1[0].encrypted_content().len() < MAX_EMBEDDED_COMMIT_HEADER_SIZE
{
(
Some(CommitHeaderRef {
obj: CommitHeaderObject::EncryptedContent(
res.1[0].encrypted_content().to_vec(),
),
key: res.0.key,
}),
vec![],
)
} else {
header.as_mut().unwrap().set_id(res.0.id);
(
Some(CommitHeaderRef {
obj: CommitHeaderObject::Id(res.0.id),
key: res.0.key,
}),
res.1,
)
}
}
};
let content_ser = serde_bare::to_vec(&content).unwrap(); let content_ser = serde_bare::to_vec(&content).unwrap();
if EMPTY_BLOCK_SIZE if EMPTY_BLOCK_SIZE
+ DATA_VARINT_EXTRA + DATA_VARINT_EXTRA
+ MAX_EMBEDDED_COMMIT_HEADER_SIZE
+ BLOCK_ID_SIZE * header_ref.as_ref().map_or(0, |_| 1) + BLOCK_ID_SIZE * header_ref.as_ref().map_or(0, |_| 1)
+ content_ser.len() + content_ser.len()
<= valid_block_size <= valid_block_size
@ -234,13 +282,16 @@ impl Object {
let arity: usize = let arity: usize =
(valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_HEADER_SIZE) (valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_HEADER_SIZE)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
let mut parents = Self::make_tree(blocks.as_slice(), &conv_key, &header_ref, arity); let mut parents = Self::make_tree(blocks.as_slice(), &conv_key, header_ref, arity);
blocks.append(&mut parents); blocks.append(&mut parents);
} }
Object { Object {
blocks, blocks,
header: header.map(|h| CommitHeader::V0(h)), header,
header_blocks,
#[cfg(test)]
already_saved: false,
} }
} }
@ -292,25 +343,44 @@ impl Object {
} }
let header = match root.header_ref() { let header = match root.header_ref() {
Some(header_ref) => { Some(header_ref) => match header_ref.obj {
let obj = Object::load(header_ref.id, Some(header_ref.key), store)?; CommitHeaderObject::None => panic!("shouldn't happen"),
match obj.content()? { CommitHeaderObject::Id(id) => {
ObjectContent::V0(ObjectContentV0::CommitHeader(commit_header)) => { let obj = Object::load(id, Some(header_ref.key.clone()), store)?;
Some(CommitHeader::V0(commit_header)) match obj.content()? {
ObjectContent::V0(ObjectContentV0::CommitHeader(mut commit_header)) => {
commit_header.set_id(id);
(Some(commit_header), Some(obj.blocks))
}
_ => return Err(ObjectParseError::InvalidHeader),
} }
_ => return Err(ObjectParseError::InvalidHeader),
} }
} CommitHeaderObject::EncryptedContent(content) => {
None => None, match serde_bare::from_slice(content.as_slice()) {
Ok(ObjectContent::V0(ObjectContentV0::CommitHeader(commit_header))) => {
(Some(commit_header), None)
}
Err(e) => return Err(ObjectParseError::InvalidHeader),
_ => return Err(ObjectParseError::InvalidHeader),
}
}
},
None => (None, None),
}; };
Ok(Object { blocks, header }) Ok(Object {
blocks,
header: header.0,
header_blocks: header.1.unwrap_or(vec![]),
#[cfg(test)]
already_saved: true,
})
} }
/// Save blocks of the object in the store /// Save blocks of the object and the blocks of the header object in the store
pub fn save(&self, store: &Box<impl RepoStore + ?Sized>) -> Result<(), StorageError> { pub fn save(&self, store: &Box<impl RepoStore + ?Sized>) -> Result<(), StorageError> {
let mut deduplicated: HashSet<ObjectId> = HashSet::new(); let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in &self.blocks { for block in self.blocks.iter().chain(self.header_blocks.iter()) {
let id = block.id(); let id = block.id();
if deduplicated.get(&id).is_none() { if deduplicated.get(&id).is_none() {
store.put(block)?; store.put(block)?;
@ -319,15 +389,30 @@ impl Object {
} }
Ok(()) Ok(())
} }
#[cfg(test)]
pub fn save_in_test(
&mut self,
store: &Box<impl RepoStore + ?Sized>,
) -> Result<(), StorageError> {
assert!(self.already_saved == false);
self.already_saved = true;
self.save(store)
}
/// Get the ID of the Object /// Get the ID of the Object
pub fn id(&self) -> ObjectId { pub fn id(&self) -> ObjectId {
self.blocks.last().unwrap().id() self.root_block().id()
}
/// Get the ID of the Object and saves it
pub fn get_and_save_id(&mut self) -> ObjectId {
self.blocks.last_mut().unwrap().get_and_save_id()
} }
/// Get the key for the Object /// Get the key for the Object
pub fn key(&self) -> Option<SymKey> { pub fn key(&self) -> Option<SymKey> {
self.blocks.last().unwrap().key() self.root_block().key()
} }
/// Get an `ObjectRef` for the root object /// Get an `ObjectRef` for the root object
@ -346,6 +431,8 @@ impl Object {
self.header.as_ref().map_or(true, |h| h.is_root()) self.header.as_ref().map_or(true, |h| h.is_root())
} }
/// Get deps (that have an ID in the header, without checking if there is a key for it in the header_keys)
/// if there is no header, returns an empty vec
pub fn deps(&self) -> Vec<ObjectId> { pub fn deps(&self) -> Vec<ObjectId> {
match &self.header { match &self.header {
Some(h) => h.deps(), Some(h) => h.deps(),
@ -353,6 +440,8 @@ impl Object {
} }
} }
/// Get acks (that have an ID in the header, without checking if there is a key for it in the header_keys)
/// if there is no header, returns an empty vec
pub fn acks(&self) -> Vec<ObjectId> { pub fn acks(&self) -> Vec<ObjectId> {
match &self.header { match &self.header {
Some(h) => h.acks(), Some(h) => h.acks(),
@ -409,7 +498,7 @@ impl Object {
match block { match block {
Block::V0(b) => { Block::V0(b) => {
// decrypt content // decrypt content in place (this is why we have to clone first)
let mut content_dec = b.content.encrypted_content().clone(); let mut content_dec = b.content.encrypted_content().clone();
match key { match key {
SymKey::ChaCha20Key(key) => { SymKey::ChaCha20Key(key) => {
@ -450,6 +539,8 @@ impl Object {
} }
ChunkContentV0::DataChunk(chunk) => { ChunkContentV0::DataChunk(chunk) => {
if leaves.is_some() { if leaves.is_some() {
//FIXME this part is never used (when leaves.is_some ?)
//FIXME if it was used, we should probably try to remove the block.clone()
let mut leaf = block.clone(); let mut leaf = block.clone();
leaf.set_key(Some(key.clone())); leaf.set_key(Some(key.clone()));
let l = &mut **leaves.as_mut().unwrap(); let l = &mut **leaves.as_mut().unwrap();
@ -482,21 +573,21 @@ impl Object {
Ok(()) Ok(())
} }
/// Parse the Object and return the leaf Blocks with decryption key set // /// Parse the Object and return the leaf Blocks with decryption key set
pub fn leaves(&self) -> Result<Vec<Block>, ObjectParseError> { // pub fn leaves(&self) -> Result<Vec<Block>, ObjectParseError> {
let mut leaves: Vec<Block> = vec![]; // let mut leaves: Vec<Block> = vec![];
let parents = vec![(self.id(), self.key().unwrap())]; // let parents = vec![(self.id(), self.key().unwrap())];
match Self::collect_leaves( // match Self::collect_leaves(
&self.blocks, // &self.blocks,
&parents, // &parents,
self.blocks.len() - 1, // self.blocks.len() - 1,
&mut Some(&mut leaves), // &mut Some(&mut leaves),
&mut None, // &mut None,
) { // ) {
Ok(_) => Ok(leaves), // Ok(_) => Ok(leaves),
Err(e) => Err(e), // Err(e) => Err(e),
} // }
} // }
/// Parse the Object and return the decrypted content assembled from Blocks /// Parse the Object and return the decrypted content assembled from Blocks
pub fn content(&self) -> Result<ObjectContent, ObjectParseError> { pub fn content(&self) -> Result<ObjectContent, ObjectParseError> {
@ -512,16 +603,13 @@ impl Object {
&mut None, &mut None,
&mut Some(&mut obj_content), &mut Some(&mut obj_content),
) { ) {
Ok(_) => { Ok(_) => match serde_bare::from_slice(obj_content.as_slice()) {
let content: ObjectContentV0; Ok(c) => Ok(c),
match serde_bare::from_slice(obj_content.as_slice()) { Err(e) => {
Ok(c) => Ok(ObjectContent::V0(c)), log_debug!("Object deserialize error: {}", e);
Err(e) => { Err(ObjectParseError::ObjectDeserializeError)
log_debug!("Object deserialize error: {}", e);
Err(ObjectParseError::ObjectDeserializeError)
}
} }
} },
Err(e) => Err(e), Err(e) => Err(e),
} }
} }
@ -572,20 +660,21 @@ mod test {
.read_to_end(&mut img_buffer) .read_to_end(&mut img_buffer)
.expect("read of test.jpg"); .expect("read of test.jpg");
let file = FileV0 { let file = File::V0(FileV0 {
content_type: "image/jpeg".into(), content_type: "image/jpeg".into(),
metadata: vec![], metadata: vec![],
content: img_buffer, content: img_buffer,
}; });
let content = ObjectContentV0::File(file); let content = ObjectContent::V0(ObjectContentV0::File(file));
let deps: Vec<ObjectId> = vec![Digest::Blake3Digest32([9; 32])]; let deps: Vec<ObjectId> = vec![Digest::Blake3Digest32([9; 32])];
let max_object_size = store_max_value_size(); let max_object_size = store_max_value_size();
let repo_secret = SymKey::ChaCha20Key([0; 32]); let store_secret = SymKey::ChaCha20Key([0; 32]);
let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); let store_pubkey = PubKey::Ed25519PubKey([1; 32]);
let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey));
let obj = Object::new(content, None, max_object_size, repo_pubkey, repo_secret); let obj = Object::new(content, None, max_object_size, &store_repo, &store_secret);
log_debug!("obj.id: {:?}", obj.id()); log_debug!("obj.id: {:?}", obj.id());
log_debug!("obj.key: {:?}", obj.key()); log_debug!("obj.key: {:?}", obj.key());
@ -607,27 +696,28 @@ mod test {
/// Test tree API /// Test tree API
#[test] #[test]
pub fn test_object() { pub fn test_object() {
let file = FileV0 { let file = File::V0(FileV0 {
content_type: "file/test".into(), content_type: "file/test".into(),
metadata: Vec::from("some meta data here"), metadata: Vec::from("some meta data here"),
content: [(0..255).collect::<Vec<u8>>().as_slice(); 320].concat(), content: [(0..255).collect::<Vec<u8>>().as_slice(); 320].concat(),
}; });
let content = ObjectContentV0::File(file); let content = ObjectContent::V0(ObjectContentV0::File(file));
let deps = vec![Digest::Blake3Digest32([9; 32])]; let deps = vec![Digest::Blake3Digest32([9; 32])];
let header = CommitHeaderV0::new_with_deps(deps.clone()); let header = CommitHeader::new_with_deps(deps.clone());
let exp = Some(2u32.pow(31)); let exp = Some(2u32.pow(31));
let max_object_size = 0; let max_object_size = 0;
let repo_secret = SymKey::ChaCha20Key([0; 32]); let store_secret = SymKey::ChaCha20Key([0; 32]);
let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); let store_pubkey = PubKey::Ed25519PubKey([1; 32]);
let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey));
let obj = Object::new( let mut obj = Object::new(
content.clone(), content.clone(),
header, header,
max_object_size, max_object_size,
repo_pubkey, &store_repo,
repo_secret.clone(), &store_secret,
); );
log_debug!("obj.id: {:?}", obj.id()); log_debug!("obj.id: {:?}", obj.id());
@ -645,14 +735,14 @@ mod test {
assert_eq!(*obj.deps(), deps); assert_eq!(*obj.deps(), deps);
match obj.content() { match obj.content() {
Ok(ObjectContent::V0(cnt)) => { Ok(cnt) => {
assert_eq!(content, cnt); assert_eq!(content, cnt);
} }
Err(e) => panic!("Object parse error: {:?}", e), Err(e) => panic!("Object parse error: {:?}", e),
} }
let store = Box::new(HashMapRepoStore::new()); let store = Box::new(HashMapRepoStore::new());
obj.save(&store).expect("Object save error"); obj.save_in_test(&store).expect("Object save error");
let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap(); let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap();
@ -669,7 +759,7 @@ mod test {
assert_eq!(*obj2.deps(), deps); assert_eq!(*obj2.deps(), deps);
match obj2.content_v0() { match obj2.content() {
Ok(cnt) => { Ok(cnt) => {
assert_eq!(content, cnt); assert_eq!(content, cnt);
} }
@ -702,11 +792,11 @@ mod test {
pub fn test_depth_1() { pub fn test_depth_1() {
let deps: Vec<ObjectId> = vec![Digest::Blake3Digest32([9; 32])]; let deps: Vec<ObjectId> = vec![Digest::Blake3Digest32([9; 32])];
let empty_file = ObjectContentV0::File(FileV0 { let empty_file = ObjectContent::V0(ObjectContentV0::File(File::V0(FileV0 {
content_type: "".into(), content_type: "".into(),
metadata: vec![], metadata: vec![],
content: vec![], content: vec![],
}); })));
let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap(); let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap();
log_debug!("empty file size: {}", empty_file_ser.len()); log_debug!("empty file size: {}", empty_file_ser.len());
@ -718,26 +808,27 @@ mod test {
- DATA_VARINT_EXTRA; - DATA_VARINT_EXTRA;
log_debug!("file size: {}", size); log_debug!("file size: {}", size);
let content = ObjectContentV0::File(FileV0 { let content = ObjectContent::V0(ObjectContentV0::File(File::V0(FileV0 {
content_type: "".into(), content_type: "".into(),
metadata: vec![], metadata: vec![],
content: vec![99; size], content: vec![99; size],
}); })));
let content_ser = serde_bare::to_vec(&content).unwrap(); let content_ser = serde_bare::to_vec(&content).unwrap();
log_debug!("content len: {}", content_ser.len()); log_debug!("content len: {}", content_ser.len());
let expiry = Some(2u32.pow(31)); let expiry = Some(2u32.pow(31));
let max_object_size = store_max_value_size(); let max_object_size = store_max_value_size();
let repo_secret = SymKey::ChaCha20Key([0; 32]); let store_secret = SymKey::ChaCha20Key([0; 32]);
let repo_pubkey = PubKey::Ed25519PubKey([1; 32]); let store_pubkey = PubKey::Ed25519PubKey([1; 32]);
let store_repo = StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey));
let object = Object::new( let object = Object::new(
content, content,
CommitHeaderV0::new_with_deps(deps), CommitHeader::new_with_deps(deps),
max_object_size, max_object_size,
repo_pubkey, &store_repo,
repo_secret, &store_secret,
); );
log_debug!("root_id: {:?}", object.id()); log_debug!("root_id: {:?}", object.id());
@ -779,7 +870,7 @@ mod test {
let root_depsref = Block::new( let root_depsref = Block::new(
vec![], vec![],
Some(ObjectRef::from_id_key(id, key.clone())), Some(CommitHeaderRef::from_id_key(id, key.clone())),
data_ser.clone(), data_ser.clone(),
None, None,
); );
@ -797,7 +888,7 @@ mod test {
let root_one = Block::new( let root_one = Block::new(
vec![id; 1], vec![id; 1],
Some(ObjectRef::from_id_key(id, key.clone())), Some(CommitHeaderRef::from_id_key(id, key.clone())),
one_key_ser.clone(), one_key_ser.clone(),
None, None,
); );
@ -805,7 +896,7 @@ mod test {
let root_two = Block::new( let root_two = Block::new(
vec![id; 2], vec![id; 2],
Some(ObjectRef::from_id_key(id, key)), Some(CommitHeaderRef::from_id_key(id, key)),
two_keys_ser.clone(), two_keys_ser.clone(),
None, None,
); );

@ -35,20 +35,29 @@ impl Repository {
pub struct UserInfo { pub struct UserInfo {
/// list of permissions granted to user, with optional metadata /// list of permissions granted to user, with optional metadata
pub permissions: HashMap<Permission, Vec<u8>>, pub permissions: HashMap<PermissionV0, Vec<u8>>,
pub id: UserId,
} }
impl UserInfo { impl UserInfo {
pub fn has_any_perm(&self, perms: &HashSet<&Permission>) -> Result<(), NgError> { pub fn has_any_perm(&self, perms: &HashSet<PermissionV0>) -> Result<(), NgError> {
let has_perms: HashSet<&Permission> = self.permissions.keys().collect(); if self.has_perm(&PermissionV0::Owner).is_ok() {
if has_perms.intersection(perms).count() > 0 { return Ok(());
Ok(())
} else {
Err(NgError::PermissionDenied)
} }
// let is_admin = self.has_perm(&PermissionV0::Admin).is_ok();
//is_delegated_by_admin
let has_perms: HashSet<&PermissionV0> = self.permissions.keys().collect();
for perm in perms {
if is_admin && perm.is_delegated_by_admin() || has_perms.contains(perm) {
return Ok(());
}
}
// if has_perms.intersection(perms).count() > 0 {
// Ok(())
// } else {
Err(NgError::PermissionDenied)
} }
pub fn has_perm(&self, perm: &Permission) -> Result<&Vec<u8>, NgError> { pub fn has_perm(&self, perm: &PermissionV0) -> Result<&Vec<u8>, NgError> {
self.permissions.get(perm).ok_or(NgError::PermissionDenied) self.permissions.get(perm).ok_or(NgError::PermissionDenied)
} }
} }
@ -58,7 +67,7 @@ pub struct Repo<'a> {
/// Repo definition /// Repo definition
pub repo_def: Repository, pub repo_def: Repository,
pub members: HashMap<UserId, UserInfo>, pub members: HashMap<Digest, UserInfo>,
store: Box<dyn RepoStore + Send + Sync + 'a>, store: Box<dyn RepoStore + Send + Sync + 'a>,
} }
@ -66,8 +75,8 @@ pub struct Repo<'a> {
impl<'a> Repo<'a> { impl<'a> Repo<'a> {
pub fn new_with_member( pub fn new_with_member(
id: &PubKey, id: &PubKey,
member: UserId, member: &UserId,
perms: &[Permission], perms: &[PermissionV0],
store: Box<dyn RepoStore + Send + Sync + 'a>, store: Box<dyn RepoStore + Send + Sync + 'a>,
) -> Self { ) -> Self {
let mut members = HashMap::new(); let mut members = HashMap::new();
@ -75,11 +84,17 @@ impl<'a> Repo<'a> {
perms perms
.iter() .iter()
.map(|p| (*p, vec![])) .map(|p| (*p, vec![]))
.collect::<Vec<(Permission, Vec<u8>)>>() .collect::<Vec<(PermissionV0, Vec<u8>)>>()
.iter() .iter()
.cloned(), .cloned(),
); );
members.insert(member, UserInfo { permissions }); members.insert(
member.into(),
UserInfo {
id: *member,
permissions,
},
);
Self { Self {
repo_def: Repository::new(id, &vec![]), repo_def: Repository::new(id, &vec![]),
members, members,
@ -88,9 +103,9 @@ impl<'a> Repo<'a> {
} }
pub fn verify_permission(&self, commit: &Commit) -> Result<(), NgError> { pub fn verify_permission(&self, commit: &Commit) -> Result<(), NgError> {
let content = commit.content_v0(); let content_author = commit.content_v0().author;
let body = commit.load_body(&self.store)?; let body = commit.load_body(&self.store)?;
match self.members.get(&content.author) { match self.members.get(&content_author) {
Some(info) => return info.has_any_perm(&body.required_permission()), Some(info) => return info.has_any_perm(&body.required_permission()),
None => {} None => {}
} }

@ -33,22 +33,30 @@ impl SiteV0 {
let private_key = PrivKey::random_ed(); let private_key = PrivKey::random_ed();
/* pub key: PrivKey,
// signature with site_key
// pub sig: Sig,
/// current read capabililty
pub read_cap: ReadCap,
pub write_cap: RepoWriteCapSecret, */
let public = SiteStore { let public = SiteStore {
key: PrivKey::dummy(), key: PrivKey::dummy(),
root_branch_def_ref: BlockRef::dummy(), read_cap: BlockRef::dummy(),
repo_secret: SymKey::random(), write_cap: SymKey::random(),
}; };
let protected = SiteStore { let protected = SiteStore {
key: PrivKey::dummy(), key: PrivKey::dummy(),
root_branch_def_ref: BlockRef::dummy(), read_cap: BlockRef::dummy(),
repo_secret: SymKey::random(), write_cap: SymKey::random(),
}; };
let private = SiteStore { let private = SiteStore {
key: PrivKey::dummy(), key: PrivKey::dummy(),
root_branch_def_ref: BlockRef::dummy(), read_cap: BlockRef::dummy(),
repo_secret: SymKey::random(), write_cap: SymKey::random(),
}; };
Ok(Self { Ok(Self {

@ -42,6 +42,7 @@ pub enum StorageError {
BackendError, BackendError,
SerializationError, SerializationError,
AlreadyExists, AlreadyExists,
DataCorruption,
} }
impl core::fmt::Display for StorageError { impl core::fmt::Display for StorageError {
@ -112,7 +113,15 @@ impl HashMapRepoStore {
impl RepoStore for HashMapRepoStore { impl RepoStore for HashMapRepoStore {
fn get(&self, id: &BlockId) -> Result<Block, StorageError> { fn get(&self, id: &BlockId) -> Result<Block, StorageError> {
match self.blocks.read().unwrap().get(id) { match self.blocks.read().unwrap().get(id) {
Some(block) => Ok(block.clone()), Some(block) => {
let mut b = block.clone();
let i = b.get_and_save_id();
if *id == i {
Ok(b)
} else {
Err(StorageError::DataCorruption)
}
}
None => Err(StorageError::NotFound), None => Err(StorageError::NotFound),
} }
} }

File diff suppressed because it is too large Load Diff

@ -141,7 +141,7 @@ pub fn verify(content: &Vec<u8>, sig: Sig, pub_key: PubKey) -> Result<(), NgErro
let sig_bytes = match sig { let sig_bytes = match sig {
Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(), Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(),
}; };
let sig = Signature::from_bytes(&sig_bytes)?; let sig = ed25519_dalek::Signature::from_bytes(&sig_bytes)?;
Ok(pk.verify_strict(content, &sig)?) Ok(pk.verify_strict(content, &sig)?)
} }

@ -97,8 +97,8 @@ impl RepoStore for LmdbRepoStore {
match serde_bare::from_slice::<Block>(&block_ser.to_bytes().unwrap()) { match serde_bare::from_slice::<Block>(&block_ser.to_bytes().unwrap()) {
Err(_e) => Err(StorageError::InvalidValue), Err(_e) => Err(StorageError::InvalidValue),
Ok(o) => { Ok(mut o) => {
if o.id() != *block_id { if o.get_and_save_id() != *block_id {
log_debug!( log_debug!(
"Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}", "Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}",
block_id, block_id,

@ -88,8 +88,8 @@ impl RepoStore for LmdbRepoStore {
match serde_bare::from_slice::<Block>(&block_ser.to_bytes().unwrap()) { match serde_bare::from_slice::<Block>(&block_ser.to_bytes().unwrap()) {
Err(_e) => Err(StorageError::InvalidValue), Err(_e) => Err(StorageError::InvalidValue),
Ok(o) => { Ok(mut o) => {
if o.id() != *block_id { if o.get_and_save_id() != *block_id {
log_debug!( log_debug!(
"Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}", "Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}",
block_id, block_id,

Loading…
Cancel
Save