From f349d4a7485f611e7ede09a096fc721f817a613f Mon Sep 17 00:00:00 2001 From: Niko PLP Date: Sun, 14 Apr 2024 22:01:58 +0300 Subject: [PATCH] refactor UserStorage and BlockStorage --- Cargo.lock | 7 + nextgraph/Cargo.toml | 4 + nextgraph/src/local_broker.rs | 165 +++- ng-broker/src/broker_storage/account.rs | 36 +- ng-broker/src/broker_storage/config.rs | 6 +- ng-broker/src/broker_storage/invitation.rs | 26 +- ng-broker/src/broker_storage/overlay.rs | 35 +- ng-broker/src/broker_storage/peer.rs | 23 +- ng-broker/src/broker_storage/topic.rs | 24 +- ng-broker/src/broker_storage/wallet.rs | 6 +- ng-net/src/broker.rs | 56 +- ng-net/src/connection.rs | 9 +- ng-repo/src/block_storage.rs | 18 +- ng-repo/src/branch.rs | 128 +-- ng-repo/src/commit.rs | 205 ++-- ng-repo/src/errors.rs | 1 + ng-repo/src/event.rs | 23 +- ng-repo/src/file.rs | 287 ++---- ng-repo/src/kcv_storage.rs | 107 +- ng-repo/src/lib.rs | 2 + ng-repo/src/object.rs | 113 +-- ng-repo/src/repo.rs | 457 +-------- ng-repo/src/store.rs | 474 +++++++++ ng-repo/src/types.rs | 48 +- ng-storage-rocksdb/src/block_storage.rs | 1020 ++------------------ ng-storage-rocksdb/src/kcv_storage.rs | 464 ++++++--- ng-verifier/Cargo.toml | 7 +- ng-verifier/src/lib.rs | 2 + ng-verifier/src/types.rs | 115 ++- ng-verifier/src/verifier.rs | 309 ++++++ 30 files changed, 1930 insertions(+), 2247 deletions(-) create mode 100644 ng-repo/src/store.rs create mode 100644 ng-verifier/src/verifier.rs diff --git a/Cargo.lock b/Cargo.lock index e41c72f..6a5b6ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3223,10 +3223,12 @@ version = "0.1.0" dependencies = [ "async-once-cell", "async-std", + "async-trait", "base64-url", "ng-client-ws", "ng-net", "ng-repo", + "ng-storage-rocksdb", "ng-verifier", "ng-wallet", "once_cell", @@ -3413,16 +3415,21 @@ dependencies = [ name = "ng-verifier" version = "0.1.0" dependencies = [ + "async-std", "automerge", "blake3", "chacha20", + "getrandom 0.2.10", "ng-net", "ng-repo", "ng-storage-rocksdb", "oxigraph", + "rand 0.7.3", "serde", "serde_bare", "serde_bytes", + "threshold_crypto", + "web-time", "yrs", ] diff --git a/nextgraph/Cargo.toml b/nextgraph/Cargo.toml index 238888a..79d105c 100644 --- a/nextgraph/Cargo.toml +++ b/nextgraph/Cargo.toml @@ -31,6 +31,10 @@ web-time = "0.2.0" async-std = { version = "1.12.0", features = [ "attributes", "unstable" ] } zeroize = { version = "1.6.0", features = ["zeroize_derive"] } serde_json = "1.0" +async-trait = "0.1.64" + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" } [[example]] name = "in_memory" diff --git a/nextgraph/src/local_broker.rs b/nextgraph/src/local_broker.rs index b6cd9e2..beccc28 100644 --- a/nextgraph/src/local_broker.rs +++ b/nextgraph/src/local_broker.rs @@ -8,13 +8,17 @@ // according to those terms. use async_once_cell::OnceCell; -use async_std::sync::{Arc, RwLock}; +use async_std::sync::{Arc, Mutex, RwLock}; use core::fmt; -use ng_net::connection::{ClientConfig, IConnect, StartConfig}; -use ng_net::types::{ClientInfo, ClientType}; +use ng_net::actor::EActor; +use ng_net::connection::{ClientConfig, IConnect, NoiseFSM, StartConfig}; +use ng_net::errors::ProtocolError; +use ng_net::types::{ClientInfo, ClientType, ProtocolMessage}; use ng_net::utils::{Receiver, Sender}; +use ng_repo::block_storage::HashMapBlockStorage; use ng_repo::os_info::get_os_info; use ng_verifier::types::*; +use ng_verifier::verifier::Verifier; use ng_wallet::emojis::encode_pazzle; use once_cell::sync::Lazy; use serde_bare::to_vec; @@ -25,6 +29,7 @@ use std::path::PathBuf; use zeroize::{Zeroize, ZeroizeOnDrop}; use ng_net::broker::*; +use ng_repo::block_storage::BlockStorage; use ng_repo::errors::NgError; use ng_repo::log::*; use ng_repo::types::*; @@ -35,6 +40,8 @@ use ng_wallet::{create_wallet_v0, types::*}; use ng_client_ws::remote_ws::ConnectionWebSocket; #[cfg(target_arch = "wasm32")] use ng_client_ws::remote_ws_wasm::ConnectionWebSocket; +#[cfg(not(target_arch = "wasm32"))] +use ng_storage_rocksdb::block_storage::RocksDbBlockStorage; type JsStorageReadFn = dyn Fn(String) -> Result + 'static + Sync + Send; type JsStorageWriteFn = dyn Fn(String, String) -> Result<(), NgError> + 'static + Sync + Send; @@ -173,6 +180,16 @@ impl LocalBrokerConfig { _ => None, } } + fn compute_path(&self, dir: &String) -> Result { + match self { + Self::BasePath(path) => { + let mut new_path = path.clone(); + new_path.push(dir); + Ok(new_path) + } + _ => Err(NgError::InvalidArgument), + } + } } #[derive(Debug)] @@ -296,13 +313,24 @@ impl SessionConfig { // } // } +struct OpenedWallet { + wallet: SensitiveWallet, + block_storage: Arc>, +} + +impl fmt::Debug for OpenedWallet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "OpenedWallet.\nwallet {:?}", self.wallet) + } +} + #[derive(Debug)] struct LocalBroker { pub config: LocalBrokerConfig, pub wallets: HashMap, - pub opened_wallets: HashMap, + pub opened_wallets: HashMap, pub sessions: HashMap, @@ -311,7 +339,31 @@ struct LocalBroker { pub opened_sessions_list: Vec>, } -impl ILocalBroker for LocalBroker {} +// used to deliver events to the verifier on Clients, or Core that have Verifiers attached. +#[async_trait::async_trait] +impl ILocalBroker for LocalBroker { + async fn deliver(&mut self, event: Event) {} +} + +// this is used if an Actor does a BROKER.local_broker.respond +// it happens when a remote peer is doing a request on the verifier +#[async_trait::async_trait] +impl EActor for LocalBroker { + async fn respond( + &mut self, + msg: ProtocolMessage, + fsm: Arc>, + ) -> Result<(), ProtocolError> { + // search opened_sessions by user_id of fsm + let session = match fsm.lock().await.user_id() { + Some(user) => self + .get_mut_session_for_user(&user) + .ok_or(ProtocolError::ActorError)?, + None => return Err(ProtocolError::ActorError), + }; + session.verifier.respond(msg, fsm).await + } +} impl LocalBroker { fn storage_path_for_user(&self, user_id: &UserId) -> Option { @@ -325,6 +377,13 @@ impl LocalBroker { } } + fn get_mut_session_for_user(&mut self, user: &UserId) -> Option<&mut Session> { + match self.opened_sessions.get(user) { + Some(idx) => self.opened_sessions_list[*idx as usize].as_mut(), + None => None, + } + } + fn verifier_config_type_from_session_config( &self, config: &SessionConfig, @@ -356,12 +415,13 @@ impl LocalBroker { let session = self.opened_sessions_list[*session_idx as usize] .as_mut() .ok_or(NgError::SessionNotFound)?; - let wallet = match &session.config { + let wallet = &match &session.config { SessionConfig::V0(v0) => self .opened_wallets .get(&v0.wallet_name) .ok_or(NgError::WalletNotFound), - }?; + }? + .wallet; Ok((wallet, session)) } @@ -426,7 +486,13 @@ async fn init_(config: LocalBrokerConfig) -> Result>, Ng }; //log_debug!("{:?}", &local_broker); - Ok(Arc::new(RwLock::new(local_broker))) + let broker = Arc::new(RwLock::new(local_broker)); + + BROKER.write().await.set_local_broker(Arc::clone( + &(Arc::clone(&broker) as Arc>), + )); + + Ok(broker) } #[doc(hidden)] @@ -671,20 +737,54 @@ pub async fn wallet_was_opened(mut wallet: SensitiveWallet) -> Result { if wallet.client().is_none() { // this case happens when the wallet is opened and not when it is imported (as the client is already there) wallet.set_client(lws.to_client_v0(wallet.privkey())?); } + lws } None => { return Err(NgError::WalletNotFound); } - } + }; + let block_storage = if lws.in_memory { + Arc::new(std::sync::RwLock::new(HashMapBlockStorage::new())) + as Arc> + } else { + #[cfg(not(target_family = "wasm"))] + { + let mut key_material = wallet + .client() + .as_ref() + .unwrap() + .sensitive_client_storage + .priv_key + .slice(); + let path = broker + .config + .compute_path(&wallet.client().as_ref().unwrap().id.to_hash_string())?; + let mut key: [u8; 32] = + derive_key("NextGraph Client BlockStorage BLAKE3 key", key_material); + Arc::new(std::sync::RwLock::new(RocksDbBlockStorage::open( + &path, key, + )?)) as Arc> + } + #[cfg(target_family = "wasm")] + { + panic!("no RocksDB in WASM"); + } + }; let client = wallet.client().as_ref().unwrap().clone(); - broker.opened_wallets.insert(wallet.id(), wallet); + let opened_wallet = OpenedWallet { + wallet, + block_storage, + }; + + broker.opened_wallets.insert(wallet_id, opened_wallet); Ok(client) } @@ -706,14 +806,16 @@ pub async fn session_start(mut config: SessionConfig) -> Result return Err(NgError::WalletNotFound), - Some(wallet) => { - let credentials = match wallet.individual_site(&user_id) { + Some(opened_wallet) => { + let block_storage = Arc::clone(&opened_wallet.block_storage); + let credentials = match opened_wallet.wallet.individual_site(&user_id) { Some(creds) => creds.clone(), None => return Err(NgError::NotFound), }; let client_storage_master_key = serde_bare::to_vec( - &wallet + &opened_wallet + .wallet .client() .as_ref() .unwrap() @@ -744,7 +846,7 @@ pub async fn session_start(mut config: SessionConfig) -> Result Result Result, ) -> Result, f64)>, NgError> { //FIXME: release this write lock much sooner than at the end of the loop of all tries to connect to some servers ? - // or maybe it is good to block as we dont want concurrent connection attemps potentially to the same server + // or maybe it is good to block as we dont want concurrent connection attempts potentially to the same server let mut local_broker = match LOCAL_BROKER.get() { None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized), Some(Ok(broker)) => broker.write().await, @@ -1084,8 +1189,8 @@ pub async fn wallet_close(wallet_name: &String) -> Result<(), NgError> { }; match broker.opened_wallets.remove(wallet_name) { - Some(mut wallet) => { - for user in wallet.sites() { + Some(mut opened_wallet) => { + for user in opened_wallet.wallet.sites() { let key: PubKey = (user.as_str()).try_into().unwrap(); match broker.opened_sessions.remove(&key) { Some(id) => { @@ -1094,7 +1199,7 @@ pub async fn wallet_close(wallet_name: &String) -> Result<(), NgError> { None => {} } } - wallet.zeroize(); + opened_wallet.wallet.zeroize(); } None => return Err(NgError::WalletNotFound), } @@ -1123,15 +1228,15 @@ pub async fn doc_fetch( nuri: String, payload: Option, ) -> Result<(Receiver, CancelFn), NgError> { - let broker = match LOCAL_BROKER.get() { + let mut broker = match LOCAL_BROKER.get() { None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized), - Some(Ok(broker)) => broker.read().await, + Some(Ok(broker)) => broker.write().await, }; if session_id as usize >= broker.opened_sessions_list.len() { return Err(NgError::InvalidArgument); } let session = broker.opened_sessions_list[session_id as usize] - .as_ref() + .as_mut() .ok_or(NgError::SessionNotFound)?; session.verifier.doc_fetch(nuri, payload) diff --git a/ng-broker/src/broker_storage/account.rs b/ng-broker/src/broker_storage/account.rs index 04e949b..5a959dd 100644 --- a/ng-broker/src/broker_storage/account.rs +++ b/ng-broker/src/broker_storage/account.rs @@ -60,7 +60,13 @@ impl<'a> Account<'a> { if acc.exists() { return Err(StorageError::AlreadyExists); } - store.put(Self::PREFIX_ACCOUNT, &to_vec(&id)?, None, to_vec(&admin)?)?; + store.put( + Self::PREFIX_ACCOUNT, + &to_vec(&id)?, + None, + &to_vec(&admin)?, + &None, + )?; Ok(acc) } @@ -71,7 +77,9 @@ impl<'a> Account<'a> { ) -> Result, StorageError> { let size = to_vec(&UserId::nil())?.len(); let mut res: Vec = vec![]; - for user in store.get_all_keys_and_values(Self::PREFIX_ACCOUNT, size, vec![], None)? { + for user in + store.get_all_keys_and_values(Self::PREFIX_ACCOUNT, size, vec![], None, &None)? + { let admin: bool = from_slice(&user.1)?; if admin == admins { let id: UserId = from_slice(&user.0[1..user.0.len()])?; @@ -82,7 +90,12 @@ impl<'a> Account<'a> { } pub fn exists(&self) -> bool { self.store - .get(Self::PREFIX_ACCOUNT, &to_vec(&self.id).unwrap(), None) + .get( + Self::PREFIX_ACCOUNT, + &to_vec(&self.id).unwrap(), + None, + &None, + ) .is_ok() } pub fn id(&self) -> UserId { @@ -106,10 +119,10 @@ impl<'a> Account<'a> { let mut id_and_client = to_vec(&self.id)?; id_and_client.append(&mut client_key_ser); if tx - .has_property_value(Self::PREFIX_CLIENT, &id_and_client, None, &vec![]) + .has_property_value(Self::PREFIX_CLIENT, &id_and_client, None, &vec![], &None) .is_err() { - tx.put(Self::PREFIX_CLIENT, &id_and_client, None, &vec![])?; + tx.put(Self::PREFIX_CLIENT, &id_and_client, None, &vec![], &None)?; } if tx .has_property_value( @@ -117,6 +130,7 @@ impl<'a> Account<'a> { &id_and_client, Some(Self::INFO), &info_ser, + &None, ) .is_err() { @@ -125,6 +139,7 @@ impl<'a> Account<'a> { &id_and_client, Some(Self::INFO), &info_ser, + &None, )?; } let now = SystemTime::now() @@ -136,6 +151,7 @@ impl<'a> Account<'a> { &id_and_client, Some(Self::LAST_SEEN), &to_vec(&now)?, + &None, )?; Ok(()) }) @@ -187,6 +203,7 @@ impl<'a> Account<'a> { &to_vec(&self.id)?, None, &to_vec(&true)?, + &None, ) .is_ok() { @@ -206,17 +223,20 @@ impl<'a> Account<'a> { let mut client_key_ser = to_vec(&client_key)?; let size = client_key_ser.len() + id.len(); - if let Ok(clients) = tx.get_all_keys_and_values(Self::PREFIX_CLIENT, size, id, None) { + if let Ok(clients) = + tx.get_all_keys_and_values(Self::PREFIX_CLIENT, size, id, None, &None) + { for client in clients { - tx.del(Self::PREFIX_CLIENT, &client.0, None)?; + tx.del(Self::PREFIX_CLIENT, &client.0, None, &None)?; tx.del_all( Self::PREFIX_CLIENT_PROPERTY, &client.0, &Self::ALL_CLIENT_PROPERTIES, + &None, )?; } } - tx.del(Self::PREFIX_ACCOUNT, &to_vec(&self.id)?, None)?; + tx.del(Self::PREFIX_ACCOUNT, &to_vec(&self.id)?, None, &None)?; Ok(()) }) } diff --git a/ng-broker/src/broker_storage/config.rs b/ng-broker/src/broker_storage/config.rs index 6090de4..c8e40dd 100644 --- a/ng-broker/src/broker_storage/config.rs +++ b/ng-broker/src/broker_storage/config.rs @@ -75,7 +75,8 @@ impl<'a> Config<'a> { Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE), - to_vec(&mode)?, + &to_vec(&mode)?, + &None, )?; Ok(acc) } @@ -85,13 +86,14 @@ impl<'a> Config<'a> { Self::PREFIX, &to_vec(&Self::KEY).unwrap(), Some(Self::SUFFIX_FOR_EXIST_CHECK), + &None, ) .is_ok() } pub fn mode(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE)) + .get(Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE), &None) { Ok(ver) => Ok(from_slice::(&ver)?), Err(e) => Err(e), diff --git a/ng-broker/src/broker_storage/invitation.rs b/ng-broker/src/broker_storage/invitation.rs index 401b687..002b15e 100644 --- a/ng-broker/src/broker_storage/invitation.rs +++ b/ng-broker/src/broker_storage/invitation.rs @@ -75,7 +75,13 @@ impl<'a> Invitation<'a> { } let mut value = to_vec(&(code_type, expiry, memo.clone()))?; store.write_transaction(&mut |tx| { - tx.put(Self::PREFIX, &to_vec(code)?, Some(Self::TYPE), &value)?; + tx.put( + Self::PREFIX, + &to_vec(code)?, + Some(Self::TYPE), + &value, + &None, + )?; Ok(()) })?; Ok(acc) @@ -94,7 +100,7 @@ impl<'a> Invitation<'a> { unique = true; multi = true; } - for invite in store.get_all_keys_and_values(Self::PREFIX, size, vec![], None)? { + for invite in store.get_all_keys_and_values(Self::PREFIX, size, vec![], None, &None)? { if invite.0.len() == size + 2 { let code: [u8; 32] = from_slice(&invite.0[1..invite.0.len() - 1])?; if invite.0[size + 1] == Self::TYPE { @@ -138,6 +144,7 @@ impl<'a> Invitation<'a> { Self::PREFIX, &to_vec(&self.id).unwrap(), Some(Self::SUFFIX_FOR_EXIST_CHECK), + &None, ) .is_ok() } @@ -148,7 +155,7 @@ impl<'a> Invitation<'a> { pub fn get_type(&self) -> Result { let type_ser = self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE))?; + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE), &None)?; let t: (u8, u32, Option) = from_slice(&type_ser)?; // if t.1 < now_timestamp() { // return Err(ProtocolError::Expired); @@ -157,9 +164,9 @@ impl<'a> Invitation<'a> { } pub fn is_expired(&self) -> Result { - let expire_ser = self - .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE))?; + let expire_ser = + self.store + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE), &None)?; let expire: (u8, u32, Option) = from_slice(&expire_ser)?; if expire.1 < now_timestamp() { return Ok(true); @@ -169,7 +176,12 @@ impl<'a> Invitation<'a> { pub fn del(&self) -> Result<(), StorageError> { self.store.write_transaction(&mut |tx| { - tx.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)?; + tx.del_all( + Self::PREFIX, + &to_vec(&self.id)?, + &Self::ALL_PROPERTIES, + &None, + )?; Ok(()) }) } diff --git a/ng-broker/src/broker_storage/overlay.rs b/ng-broker/src/broker_storage/overlay.rs index a322220..7d6f483 100644 --- a/ng-broker/src/broker_storage/overlay.rs +++ b/ng-broker/src/broker_storage/overlay.rs @@ -79,6 +79,7 @@ impl<'a> Overlay<'a> { &to_vec(&id)?, Some(Self::SECRET), &to_vec(&secret)?, + &None, )?; if repo.is_some() { tx.put( @@ -86,6 +87,7 @@ impl<'a> Overlay<'a> { &to_vec(&id)?, Some(Self::REPO), &to_vec(&repo.unwrap())?, + &None, )?; } let meta = OverlayMeta { @@ -97,6 +99,7 @@ impl<'a> Overlay<'a> { &to_vec(&id)?, Some(Self::META), &to_vec(&meta)?, + &None, )?; Ok(()) })?; @@ -108,6 +111,7 @@ impl<'a> Overlay<'a> { Self::PREFIX, &to_vec(&self.id).unwrap(), Some(Self::SUFFIX_FOR_EXIST_CHECK), + &None, ) .is_ok() } @@ -122,7 +126,8 @@ impl<'a> Overlay<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::PEER), - to_vec(peer)?, + &to_vec(peer)?, + &None, ) } pub fn remove_peer(&self, peer: &PeerId) -> Result<(), StorageError> { @@ -130,7 +135,8 @@ impl<'a> Overlay<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::PEER), - to_vec(peer)?, + &to_vec(peer)?, + &None, ) } @@ -140,6 +146,7 @@ impl<'a> Overlay<'a> { &to_vec(&self.id)?, Some(Self::PEER), &to_vec(peer)?, + &None, ) } @@ -151,7 +158,8 @@ impl<'a> Overlay<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::TOPIC), - to_vec(topic)?, + &to_vec(topic)?, + &None, ) } pub fn remove_topic(&self, topic: &TopicId) -> Result<(), StorageError> { @@ -159,7 +167,8 @@ impl<'a> Overlay<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::TOPIC), - to_vec(topic)?, + &to_vec(topic)?, + &None, ) } @@ -169,13 +178,14 @@ impl<'a> Overlay<'a> { &to_vec(&self.id)?, Some(Self::TOPIC), &to_vec(topic)?, + &None, ) } pub fn secret(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::SECRET)) + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::SECRET), &None) { Ok(secret) => Ok(from_slice::(&secret)?), Err(e) => Err(e), @@ -185,7 +195,7 @@ impl<'a> Overlay<'a> { pub fn metadata(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META)) + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META), &None) { Ok(meta) => Ok(from_slice::(&meta)?), Err(e) => Err(e), @@ -199,14 +209,15 @@ impl<'a> Overlay<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::META), - to_vec(meta)?, + &to_vec(meta)?, + &None, ) } pub fn repo(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::REPO)) + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::REPO), &None) { Ok(repo) => Ok(from_slice::(&repo)?), Err(e) => Err(e), @@ -214,7 +225,11 @@ impl<'a> Overlay<'a> { } pub fn del(&self) -> Result<(), StorageError> { - self.store - .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) + self.store.del_all( + Self::PREFIX, + &to_vec(&self.id)?, + &Self::ALL_PROPERTIES, + &None, + ) } } diff --git a/ng-broker/src/broker_storage/peer.rs b/ng-broker/src/broker_storage/peer.rs index 611aabe..435f788 100644 --- a/ng-broker/src/broker_storage/peer.rs +++ b/ng-broker/src/broker_storage/peer.rs @@ -77,12 +77,14 @@ impl<'a> Peer<'a> { &to_vec(&id)?, Some(Self::VERSION), &to_vec(&advert.version())?, + &None, )?; tx.put( Self::PREFIX, &to_vec(&id)?, Some(Self::ADVERT), &to_vec(&advert)?, + &None, )?; Ok(()) })?; @@ -94,6 +96,7 @@ impl<'a> Peer<'a> { Self::PREFIX, &to_vec(&self.id).unwrap(), Some(Self::SUFFIX_FOR_EXIST_CHECK), + &None, ) .is_ok() } @@ -103,7 +106,7 @@ impl<'a> Peer<'a> { pub fn version(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION)) + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION), &None) { Ok(ver) => Ok(from_slice::(&ver)?), Err(e) => Err(e), @@ -117,7 +120,8 @@ impl<'a> Peer<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION), - to_vec(&version)?, + &to_vec(&version)?, + &None, ) } pub fn update_advert(&self, advert: &PeerAdvert) -> Result<(), StorageError> { @@ -134,12 +138,14 @@ impl<'a> Peer<'a> { &to_vec(&self.id)?, Some(Self::VERSION), &to_vec(&advert.version())?, + &None, )?; tx.replace( Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT), &to_vec(&advert)?, + &None, )?; Ok(()) }) @@ -147,7 +153,7 @@ impl<'a> Peer<'a> { pub fn advert(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT)) + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT), &None) { Ok(advert) => Ok(from_slice::(&advert)?), Err(e) => Err(e), @@ -161,12 +167,17 @@ impl<'a> Peer<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT), - to_vec(advert)?, + &to_vec(advert)?, + &None, ) } pub fn del(&self) -> Result<(), StorageError> { - self.store - .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) + self.store.del_all( + Self::PREFIX, + &to_vec(&self.id)?, + &Self::ALL_PROPERTIES, + &None, + ) } } diff --git a/ng-broker/src/broker_storage/topic.rs b/ng-broker/src/broker_storage/topic.rs index ba61a51..0b63b38 100644 --- a/ng-broker/src/broker_storage/topic.rs +++ b/ng-broker/src/broker_storage/topic.rs @@ -63,7 +63,8 @@ impl<'a> Topic<'a> { Self::PREFIX, &to_vec(&id)?, Some(Self::META), - to_vec(&meta)?, + &to_vec(&meta)?, + &None, )?; Ok(acc) } @@ -73,6 +74,7 @@ impl<'a> Topic<'a> { Self::PREFIX, &to_vec(&self.id).unwrap(), Some(Self::SUFFIX_FOR_EXIST_CHECK), + &None, ) .is_ok() } @@ -87,7 +89,8 @@ impl<'a> Topic<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::HEAD), - to_vec(head)?, + &to_vec(head)?, + &None, ) } pub fn remove_head(&self, head: &ObjectId) -> Result<(), StorageError> { @@ -95,7 +98,8 @@ impl<'a> Topic<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::HEAD), - to_vec(head)?, + &to_vec(head)?, + &None, ) } @@ -105,13 +109,14 @@ impl<'a> Topic<'a> { &to_vec(&self.id)?, Some(Self::HEAD), &to_vec(head)?, + &None, ) } pub fn metadata(&self) -> Result { match self .store - .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META)) + .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META), &None) { Ok(meta) => Ok(from_slice::(&meta)?), Err(e) => Err(e), @@ -125,12 +130,17 @@ impl<'a> Topic<'a> { Self::PREFIX, &to_vec(&self.id)?, Some(Self::META), - to_vec(meta)?, + &to_vec(meta)?, + &None, ) } pub fn del(&self) -> Result<(), StorageError> { - self.store - .del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES) + self.store.del_all( + Self::PREFIX, + &to_vec(&self.id)?, + &Self::ALL_PROPERTIES, + &None, + ) } } diff --git a/ng-broker/src/broker_storage/wallet.rs b/ng-broker/src/broker_storage/wallet.rs index e41082a..e57f40e 100644 --- a/ng-broker/src/broker_storage/wallet.rs +++ b/ng-broker/src/broker_storage/wallet.rs @@ -47,7 +47,7 @@ impl<'a> Wallet<'a> { ) -> Result { let mut result: Option = None; self.store.write_transaction(&mut |tx| { - let got = tx.get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK)); + let got = tx.get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK), &None); match got { Err(e) => { if e == StorageError::NotFound { @@ -86,12 +86,12 @@ impl<'a> Wallet<'a> { ) -> Result { let symkey = SymKey::random(); let vec = symkey.slice().to_vec(); - tx.put(prefix, key, Some(Self::SYM_KEY), &vec)?; + tx.put(prefix, key, Some(Self::SYM_KEY), &vec, &None)?; Ok(symkey) } pub fn exists_single_key(&self, prefix: u8, key: &Vec) -> bool { self.store - .get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK)) + .get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK), &None) .is_ok() } diff --git a/ng-net/src/broker.rs b/ng-net/src/broker.rs index e26229c..7840385 100644 --- a/ng-net/src/broker.rs +++ b/ng-net/src/broker.rs @@ -11,6 +11,7 @@ //! Broker singleton present in every instance of NextGraph (Client, Server, Core node) +use crate::actor::EActor; use crate::connection::*; use crate::errors::*; use crate::server_storage::ServerStorage; @@ -66,7 +67,17 @@ pub struct ServerConfig { pub bootstrap: BootstrapContent, } -pub trait ILocalBroker: Send + Sync {} +/*pub trait EActor: Send + Sync + std::fmt::Debug { + async fn respond( + &mut self, + msg: ProtocolMessage, + fsm: Arc>, + ) -> Result<(), ProtocolError>; +}*/ +#[async_trait::async_trait] +pub trait ILocalBroker: Send + Sync + EActor { + async fn deliver(&mut self, event: Event); +} pub static BROKER: Lazy>> = Lazy::new(|| Arc::new(RwLock::new(Broker::new()))); @@ -88,7 +99,8 @@ pub struct Broker<'a> { tauri_streams: HashMap>, disconnections_sender: Sender, disconnections_receiver: Option>, - local_broker: Option>, + //local_broker: Option>, + local_broker: Option>>, } impl<'a> Broker<'a> { @@ -148,9 +160,9 @@ impl<'a> Broker<'a> { self.server_storage = Some(Box::new(storage)); } - pub fn set_local_broker(&mut self, broker: impl ILocalBroker + 'a) { + pub fn set_local_broker(&mut self, broker: Arc>) { //log_debug!("set_local_broker"); - self.local_broker = Some(Box::new(broker)); + self.local_broker = Some(broker); } pub fn set_server_config(&mut self, config: ServerConfig) { @@ -183,12 +195,11 @@ impl<'a> Broker<'a> { .as_ref() .ok_or(ProtocolError::BrokerError) } - - pub fn get_local_broker_mut( - &mut self, - ) -> Result<&mut Box, NgError> { - //log_debug!("GET STORAGE {:?}", self.server_storage); - self.local_broker.as_mut().ok_or(NgError::BrokerError) + //Option>>, + pub fn get_local_broker(&self) -> Result>, NgError> { + Ok(Arc::clone( + self.local_broker.as_ref().ok_or(NgError::BrokerError)?, + )) } #[cfg(not(target_arch = "wasm32"))] @@ -340,18 +351,19 @@ impl<'a> Broker<'a> { nuri: String, obj_ref: ObjectRef, ) -> Result { - let blockstream = self - .get_block_from_store_with_block_id(nuri, obj_ref.id, true) - .await?; - let store = Box::new(HashMapBlockStorage::from_block_stream(blockstream).await); - - Object::load(obj_ref.id, Some(obj_ref.key), &store) - .map_err(|e| match e { - ObjectParseError::MissingBlocks(_missing) => ProtocolError::MissingBlocks, - _ => ProtocolError::ObjectParseError, - })? - .content() - .map_err(|_| ProtocolError::ObjectParseError) + unimplemented!(); + // let blockstream = self + // .get_block_from_store_with_block_id(nuri, obj_ref.id, true) + // .await?; + // let store = Box::new(HashMapBlockStorage::from_block_stream(blockstream).await); + + // Object::load(obj_ref.id, Some(obj_ref.key), &store) + // .map_err(|e| match e { + // ObjectParseError::MissingBlocks(_missing) => ProtocolError::MissingBlocks, + // _ => ProtocolError::ObjectParseError, + // })? + // .content() + // .map_err(|_| ProtocolError::ObjectParseError) } pub async fn doc_sync_branch(&mut self, anuri: String) -> (Receiver, Sender) { diff --git a/ng-net/src/connection.rs b/ng-net/src/connection.rs index 80946dc..7fa5d0d 100644 --- a/ng-net/src/connection.rs +++ b/ng-net/src/connection.rs @@ -31,7 +31,7 @@ use async_std::sync::Mutex; use either::Either; use futures::{channel::mpsc, select, FutureExt, SinkExt}; use ng_repo::log::*; -use ng_repo::types::{DirectPeerId, PrivKey, PubKey, X25519PrivKey}; +use ng_repo::types::{DirectPeerId, PrivKey, PubKey, UserId, X25519PrivKey}; use ng_repo::utils::{sign, verify}; use noise_protocol::{patterns::noise_xk, CipherState, HandshakeState}; use noise_rust_crypto::*; @@ -255,6 +255,13 @@ impl NoiseFSM { } } + pub fn user_id(&self) -> Option { + match &self.config { + Some(start_config) => start_config.get_user(), + _ => None, + } + } + fn decrypt(&mut self, ciphertext: &Noise) -> Result { let ser = self .noise_cipher_state_dec diff --git a/ng-repo/src/block_storage.rs b/ng-repo/src/block_storage.rs index 51d7d63..bf46f6d 100644 --- a/ng-repo/src/block_storage.rs +++ b/ng-repo/src/block_storage.rs @@ -23,13 +23,13 @@ use std::{ pub trait BlockStorage: Send + Sync { /// Load a block from the storage. - fn get(&self, id: &BlockId) -> Result; + fn get(&self, overlay: &OverlayId, id: &BlockId) -> Result; /// Save a block to the storage. - fn put(&self, block: &Block) -> Result; + fn put(&self, overlay: &OverlayId, block: &Block) -> Result; /// Delete a block from the storage. - fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError>; + fn del(&self, overlay: &OverlayId, id: &BlockId) -> Result; /// number of Blocks in the storage fn len(&self) -> Result; @@ -91,10 +91,10 @@ impl HashMapBlockStorage { } } - pub async fn from_block_stream(mut blockstream: Receiver) -> Self { + pub async fn from_block_stream(overlay: &OverlayId, mut blockstream: Receiver) -> Self { let this = Self::new(); while let Some(block) = blockstream.next().await { - this.put(&block).unwrap(); + this.put(overlay, &block).unwrap(); } this } @@ -114,7 +114,7 @@ impl HashMapBlockStorage { } impl BlockStorage for HashMapBlockStorage { - fn get(&self, id: &BlockId) -> Result { + fn get(&self, overlay: &OverlayId, id: &BlockId) -> Result { match self.blocks.read().unwrap().get(id) { Some(block) => { let mut b = block.clone(); @@ -133,7 +133,7 @@ impl BlockStorage for HashMapBlockStorage { Ok(self.get_len()) } - fn put(&self, block: &Block) -> Result { + fn put(&self, overlay: &OverlayId, block: &Block) -> Result { let id = block.id(); //log_debug!("PUTTING {}", id); let mut b = block.clone(); @@ -142,7 +142,7 @@ impl BlockStorage for HashMapBlockStorage { Ok(id) } - fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError> { + fn del(&self, overlay: &OverlayId, id: &BlockId) -> Result { let block = self .blocks .write() @@ -150,6 +150,6 @@ impl BlockStorage for HashMapBlockStorage { .remove(id) .ok_or(StorageError::NotFound)?; let size = size_of_val(&block); - Ok((block, size)) + Ok(size) } } diff --git a/ng-repo/src/branch.rs b/ng-repo/src/branch.rs index 079f4d4..197d896 100644 --- a/ng-repo/src/branch.rs +++ b/ng-repo/src/branch.rs @@ -17,6 +17,7 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; use crate::block_storage::*; use crate::errors::*; use crate::object::*; +use crate::store::Store; use crate::types::*; use crate::utils::encrypt_in_place; @@ -97,7 +98,7 @@ impl Branch { target_heads: &[ObjectId], known_heads: &[ObjectId], //their_filter: &BloomFilter, - store: &Box, + store: &Store, ) -> Result, ObjectParseError> { //log_debug!(">> sync_req"); //log_debug!(" target_heads: {:?}", target_heads); @@ -108,7 +109,7 @@ impl Branch { /// optionally collecting the missing objects/blocks that couldn't be found locally on the way fn load_causal_past( cobj: &Object, - store: &Box, + store: &Store, theirs: &HashSet, visited: &mut HashSet, missing: &mut Option<&mut HashSet>, @@ -179,26 +180,27 @@ mod test { //use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership}; - struct Test<'a> { - storage: Box, - } - - impl<'a> Test<'a> { - fn storage(s: impl BlockStorage + 'a) -> Self { - Test { - storage: Box::new(s), - } - } - fn s(&self) -> &Box { - &self.storage - } - } + // struct Test<'a> { + // storage: Box, + // } + + // impl<'a> Test<'a> { + // fn storage(s: impl BlockStorage + 'a) -> Self { + // Test { + // storage: Box::new(s), + // } + // } + // fn s(&self) -> &Box { + // &self.storage + // } + // } use crate::branch::*; use crate::repo::Repo; use crate::log::*; + use crate::store::Store; use crate::utils::*; #[test] @@ -206,18 +208,10 @@ mod test { fn add_obj( content: ObjectContentV0, header: Option, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, + store: &Store, ) -> ObjectRef { let max_object_size = 4000; - let mut obj = Object::new( - ObjectContent::V0(content), - header, - max_object_size, - store_pubkey, - store_secret, - ); + let mut obj = Object::new(ObjectContent::V0(content), header, max_object_size, store); log_debug!(">>> add_obj"); log_debug!(" id: {:?}", obj.id()); log_debug!(" header: {:?}", obj.header()); @@ -233,16 +227,14 @@ mod test { deps: Vec, acks: Vec, body_ref: ObjectRef, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, + store: &Store, ) -> ObjectRef { let header = CommitHeader::new_with_deps_and_acks( deps.iter().map(|r| r.id).collect(), acks.iter().map(|r| r.id).collect(), ); - let overlay = store_pubkey.overlay_id_for_read_purpose(); + let overlay = store.get_store_repo().overlay_id_for_read_purpose(); let obj_ref = ObjectRef { id: ObjectId::Blake3Digest32([1; 32]), @@ -268,57 +260,34 @@ mod test { ) .unwrap(); //log_debug!("commit: {:?}", commit); - add_obj( - ObjectContentV0::Commit(Commit::V0(commit)), - header, - store_pubkey, - store_secret, - store, - ) + add_obj(ObjectContentV0::Commit(Commit::V0(commit)), header, store) } - fn add_body_branch( - branch: BranchV0, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, - ) -> ObjectRef { + fn add_body_branch(branch: BranchV0, store: &Store) -> ObjectRef { let body: CommitBodyV0 = CommitBodyV0::Branch(Branch::V0(branch)); //log_debug!("body: {:?}", body); add_obj( ObjectContentV0::CommitBody(CommitBody::V0(body)), None, - store_pubkey, - store_secret, store, ) } - fn add_body_trans( - header: Option, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, - ) -> ObjectRef { + fn add_body_trans(header: Option, store: &Store) -> ObjectRef { let content = [7u8; 777].to_vec(); let body = CommitBodyV0::AsyncTransaction(Transaction::V0(content)); //log_debug!("body: {:?}", body); add_obj( ObjectContentV0::CommitBody(CommitBody::V0(body)), header, - store_pubkey, - store_secret, store, ) } - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - // repo let (repo_privkey, repo_pubkey) = generate_keypair(); - let (store_repo, repo_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_with_key(repo_pubkey); // branch @@ -332,8 +301,8 @@ mod test { &repo_pubkey, &member_pubkey, &[PermissionV0::WriteAsync], - store_repo.overlay_id_for_read_purpose(), - t.s(), + store.get_store_repo().overlay_id_for_read_purpose(), + store, ); let repo_ref = ObjectRef { @@ -369,14 +338,9 @@ mod test { // commit bodies - let branch_body = add_body_branch( - branch.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), - ); + let branch_body = add_body_branch(branch.clone(), &repo.store); - let trans_body = add_body_trans(None, &store_repo, &repo_secret, repo.get_storage()); + let trans_body = add_body_trans(None, &repo.store); // create & add commits to store @@ -389,9 +353,7 @@ mod test { vec![], vec![], branch_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); log_debug!(">> t1"); @@ -403,9 +365,7 @@ mod test { vec![br.clone()], vec![], trans_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); log_debug!(">> t2"); @@ -417,9 +377,7 @@ mod test { vec![br.clone()], vec![], trans_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); // log_debug!(">> a3"); @@ -445,9 +403,7 @@ mod test { vec![t2.clone()], vec![t1.clone()], trans_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); log_debug!(">> t5"); @@ -459,9 +415,7 @@ mod test { vec![t1.clone(), t2.clone()], vec![t4.clone()], trans_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); log_debug!(">> a6"); @@ -473,9 +427,7 @@ mod test { vec![t4.clone()], vec![], trans_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); log_debug!(">> a7"); @@ -487,12 +439,10 @@ mod test { vec![t4.clone()], vec![], trans_body.clone(), - &store_repo, - &repo_secret, - repo.get_storage(), + &repo.store, ); - let c7 = Commit::load(a7.clone(), repo.get_storage(), true).unwrap(); + let c7 = Commit::load(a7.clone(), &repo.store, true).unwrap(); c7.verify(&repo).unwrap(); // let mut filter = Filter::new(FilterBuilder::new(10, 0.01)); @@ -517,7 +467,7 @@ mod test { &[t5.id, a6.id, a7.id], &[t5.id], //&their_commits, - repo.get_storage(), + &repo.store, ) .unwrap(); diff --git a/ng-repo/src/commit.rs b/ng-repo/src/commit.rs index dc2ff5e..0a72a5d 100644 --- a/ng-repo/src/commit.rs +++ b/ng-repo/src/commit.rs @@ -20,6 +20,7 @@ use crate::errors::*; use crate::log::*; use crate::object::*; use crate::repo::Repo; +use crate::store::Store; use crate::types::*; use crate::utils::*; use std::collections::HashSet; @@ -124,13 +125,7 @@ impl CommitV0 { }) } - pub fn save( - &mut self, - block_size: usize, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, - ) -> Result { + pub fn save(&mut self, block_size: usize, store: &Store) -> Result { if self.id.is_some() && self.key.is_some() { return Ok(ObjectRef::from_id_key( self.id.unwrap(), @@ -142,8 +137,7 @@ impl CommitV0 { ObjectContent::V0(ObjectContentV0::Commit(Commit::V0(self.clone()))), self.header.clone(), block_size, - store_pubkey, - store_secret, + store, ); self.blocks = obj.save(store)?; if let Some(h) = &mut self.header { @@ -222,9 +216,7 @@ impl Commit { deps: Vec, acks: Vec, body: CommitBody, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - storage: &Box, + store: &Store, ) -> Result { Self::new_with_body_and_save( author_privkey, @@ -240,9 +232,7 @@ impl Commit { vec![], body, 0, - store_pubkey, - store_secret, - storage, + store, ) } @@ -261,14 +251,10 @@ impl Commit { metadata: Vec, body: CommitBody, block_size: usize, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - storage: &Box, + store: &Store, ) -> Result { - let (body_ref, mut saved_body) = - body.clone() - .save(block_size, store_pubkey, store_secret, storage)?; - let overlay = store_pubkey.overlay_id_for_read_purpose(); + let (body_ref, mut saved_body) = body.clone().save(block_size, store)?; + let overlay = store.get_store_repo().overlay_id_for_read_purpose(); let mut commit_v0 = CommitV0::new( author_privkey, author_pubkey, @@ -285,7 +271,7 @@ impl Commit { body_ref, )?; commit_v0.body.set(body).unwrap(); - let _commit_ref = commit_v0.save(block_size, store_pubkey, store_secret, storage)?; + let _commit_ref = commit_v0.save(block_size, store)?; commit_v0.blocks.append(&mut saved_body); Ok(Commit::V0(commit_v0)) @@ -302,15 +288,9 @@ impl Commit { } } - pub fn save( - &mut self, - block_size: usize, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, - ) -> Result { + pub fn save(&mut self, block_size: usize, store: &Store) -> Result { match self { - Commit::V0(v0) => v0.save(block_size, store_pubkey, store_secret, store), + Commit::V0(v0) => v0.save(block_size, store), } } @@ -323,7 +303,7 @@ impl Commit { /// Load commit from store pub fn load( commit_ref: ObjectRef, - store: &Box, + store: &Store, with_body: bool, ) -> Result { let (id, key) = (commit_ref.id, commit_ref.key); @@ -354,10 +334,7 @@ impl Commit { } /// Load commit body from store - pub fn load_body( - &self, - store: &Box, - ) -> Result<&CommitBody, CommitLoadError> { + pub fn load_body(&self, store: &Store) -> Result<&CommitBody, CommitLoadError> { if self.body().is_some() { return Ok(self.body().unwrap()); } @@ -455,10 +432,7 @@ impl Commit { } } - pub fn owners_signature_required( - &self, - store: &Box, - ) -> Result { + pub fn owners_signature_required(&self, store: &Store) -> Result { match self.load_body(store)? { CommitBody::V0(CommitBodyV0::UpdateRootBranch(new_root)) => { // load deps (the previous RootBranch commit) @@ -635,7 +609,7 @@ impl Commit { /// or a list of missing blocks pub fn verify_full_object_refs_of_branch_at_commit( &self, - store: &Box, + store: &Store, ) -> Result, CommitLoadError> { //log_debug!(">> verify_full_object_refs_of_branch_at_commit: #{}", self.seq()); @@ -643,7 +617,7 @@ impl Commit { /// and collect missing `ObjectId`s fn load_direct_object_refs( commit: &Commit, - store: &Box, + store: &Store, visited: &mut HashSet, missing: &mut HashSet, ) -> Result<(), CommitLoadError> { @@ -727,7 +701,7 @@ impl Commit { } self.verify_sig(repo)?; self.verify_perm(repo)?; - self.verify_full_object_refs_of_branch_at_commit(repo.get_storage())?; + //self.verify_full_object_refs_of_branch_at_commit(repo.store.unwrap())?; Ok(()) } } @@ -770,16 +744,13 @@ impl CommitBody { pub fn save( self, block_size: usize, - store_pubkey: &StoreRepo, - store_secret: &ReadCapSecret, - store: &Box, + store: &Store, ) -> Result<(ObjectRef, Vec), StorageError> { let obj = Object::new( ObjectContent::V0(ObjectContentV0::CommitBody(self)), None, block_size, - store_pubkey, - store_secret, + store, ); let blocks = obj.save(store)?; Ok((obj.reference().unwrap(), blocks)) @@ -1457,20 +1428,20 @@ mod test { use crate::commit::*; use crate::log::*; - struct Test<'a> { - storage: Box, - } + // struct Test<'a> { + // storage: Box, + // } - impl<'a> Test<'a> { - fn storage(s: impl BlockStorage + 'a) -> Self { - Test { - storage: Box::new(s), - } - } - fn s(&self) -> &Box { - &self.storage - } - } + // impl<'a> Test<'a> { + // fn storage(s: impl BlockStorage + 'a) -> Self { + // Test { + // storage: Box::new(s), + // } + // } + // fn s(&self) -> &Box { + // &self.storage + // } + // } fn test_commit_header_ref_content_fits( obj_refs: Vec, @@ -1510,20 +1481,13 @@ mod test { let max_object_size = 0; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - let hashmap_storage = HashMapBlockStorage::new(); - let storage = Box::new(hashmap_storage); + let store = Store::dummy_public_v0(); - let commit_ref = commit - .save(max_object_size, &store_repo, &store_secret, &storage) - .expect("save commit"); + let commit_ref = commit.save(max_object_size, &store).expect("save commit"); - let commit_object = Object::load( - commit_ref.id.clone(), - Some(commit_ref.key.clone()), - &storage, - ) - .expect("load object from storage"); + let commit_object = + Object::load(commit_ref.id.clone(), Some(commit_ref.key.clone()), &store) + .expect("load object from storage"); assert_eq!( commit_object.acks(), @@ -1536,7 +1500,7 @@ mod test { assert_eq!(commit_object.all_blocks_len(), expect_blocks_len); - let commit = Commit::load(commit_ref, &storage, false).expect("load commit from storage"); + let commit = Commit::load(commit_ref, &store, false).expect("load commit from storage"); log_debug!("{}", commit); } @@ -1569,22 +1533,16 @@ mod test { let max_object_size = 0; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); - let obj = Object::new( - content.clone(), - None, - max_object_size, - &store_repo, - &store_secret, - ); + let obj = Object::new(content.clone(), None, max_object_size, &store); let hashmap_storage = HashMapBlockStorage::new(); let storage = Box::new(hashmap_storage); - _ = obj.save(&storage).expect("save object"); + _ = obj.save(&store).expect("save object"); - let commit = Commit::load(obj.reference().unwrap(), &storage, false); + let commit = Commit::load(obj.reference().unwrap(), &store, false); assert_eq!(commit, Err(CommitLoadError::NotACommitError)); } @@ -1611,9 +1569,7 @@ mod test { let max_object_size = 0; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - let hashmap_storage = HashMapBlockStorage::new(); - let storage = Box::new(hashmap_storage); + let store = Store::dummy_public_v0(); let commit = Commit::new_with_body_and_save( &priv_key, @@ -1629,15 +1585,13 @@ mod test { metadata, body, max_object_size, - &store_repo, - &store_secret, - &storage, + &store, ) .expect("commit::new_with_body_and_save"); log_debug!("{}", commit); - let commit2 = Commit::load(commit.reference().unwrap(), &storage, true) + let commit2 = Commit::load(commit.reference().unwrap(), &store, true) .expect("load commit with body after save"); log_debug!("{}", commit2); @@ -1676,19 +1630,16 @@ mod test { .unwrap(); log_debug!("{}", commit); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); + let store = Store::dummy_public_v0(); + let repo = Repo::new_with_perms(&[PermissionV0::Create], store); - let repo = - Repo::new_with_member(&pub_key, &pub_key, &[PermissionV0::Create], overlay, t.s()); - - match commit.load_body(repo.get_storage()) { - Ok(_b) => panic!("Body should not exist"), - Err(CommitLoadError::BodyLoadError(missing)) => { - assert_eq!(missing.len(), 1); - } - Err(e) => panic!("Commit load error: {:?}", e), - } + // match commit.load_body(repo.store.unwrap()) { + // Ok(_b) => panic!("Body should not exist"), + // Err(CommitLoadError::BodyLoadError(missing)) => { + // assert_eq!(missing.len(), 1); + // } + // Err(e) => panic!("Commit load error: {:?}", e), + // } commit.verify_sig(&repo).expect("verify signature"); match commit.verify_perm(&repo) { @@ -1699,13 +1650,13 @@ mod test { Err(e) => panic!("Commit verify perm error: {:?}", e), } - match commit.verify_full_object_refs_of_branch_at_commit(repo.get_storage()) { - Ok(_) => panic!("Commit should not be Ok"), - Err(CommitLoadError::BodyLoadError(missing)) => { - assert_eq!(missing.len(), 1); - } - Err(e) => panic!("Commit verify error: {:?}", e), - } + // match commit.verify_full_object_refs_of_branch_at_commit(repo.store.unwrap()) { + // Ok(_) => panic!("Commit should not be Ok"), + // Err(CommitLoadError::BodyLoadError(missing)) => { + // assert_eq!(missing.len(), 1); + // } + // Err(e) => panic!("Commit verify error: {:?}", e), + // } match commit.verify(&repo) { Ok(_) => panic!("Commit should not be Ok"), @@ -1734,9 +1685,7 @@ mod test { let max_object_size = 0; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); + let store = Store::dummy_public_v0(); let commit = Commit::new_with_body_and_save( &priv_key, @@ -1752,23 +1701,15 @@ mod test { metadata, body, max_object_size, - &store_repo, - &store_secret, - t.s(), + &store, ) .expect("commit::new_with_body_and_save"); log_debug!("{}", commit); - let repo = Repo::new_with_member( - &pub_key, - &pub_key, - &[PermissionV0::Create], - store_repo.overlay_id_for_read_purpose(), - t.s(), - ); + let repo = Repo::new_with_perms(&[PermissionV0::Create], store); - commit.load_body(repo.get_storage()).expect("load body"); + commit.load_body(&repo.store).expect("load body"); commit.verify_sig(&repo).expect("verify signature"); commit.verify_perm(&repo).expect("verify perms"); @@ -1777,7 +1718,7 @@ mod test { .expect("verify_perm_creation"); commit - .verify_full_object_refs_of_branch_at_commit(repo.get_storage()) + .verify_full_object_refs_of_branch_at_commit(&repo.store) .expect("verify is at root of branch and singleton"); commit.verify(&repo).expect("verify"); @@ -1792,7 +1733,7 @@ mod test { let metadata = Vec::from("some metadata"); //let max_object_size = 0; - //let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + //let store = Store::dummy_public_v0(); let commit = Commit::V0( CommitV0::new_with_invalid_header( @@ -1808,16 +1749,8 @@ mod test { log_debug!("{}", commit); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let repo = Repo::new_with_member( - &pub_key, - &pub_key, - &[PermissionV0::Create], - OverlayId::dummy(), - t.s(), - ); + let store = Store::dummy_public_v0(); + let repo = Repo::new_with_perms(&[PermissionV0::Create], store); assert_eq!( commit.verify(&repo), diff --git a/ng-repo/src/errors.rs b/ng-repo/src/errors.rs index 9ddc986..959dd77 100644 --- a/ng-repo/src/errors.rs +++ b/ng-repo/src/errors.rs @@ -159,6 +159,7 @@ pub enum StorageError { SerializationError, AlreadyExists, DataCorruption, + UnknownColumnFamily, } impl core::fmt::Display for StorageError { diff --git a/ng-repo/src/event.rs b/ng-repo/src/event.rs index 243a83f..65c292a 100644 --- a/ng-repo/src/event.rs +++ b/ng-repo/src/event.rs @@ -11,6 +11,7 @@ use crate::block_storage::*; use crate::errors::*; use crate::object::*; +use crate::store::Store; use crate::types::*; use crate::utils::*; use core::fmt; @@ -55,13 +56,12 @@ impl fmt::Display for EventContentV0 { impl Event { pub fn new<'a>( publisher: &PrivKey, - seq: &mut u64, + seq: u64, commit: &Commit, additional_blocks: &Vec, topic_id: TopicId, - branch_read_cap_secret: ReadCapSecret, topic_priv_key: &BranchWriteCapSecret, - storage: &'a Box, + store: &'a Store, ) -> Result { Ok(Event::V0(EventV0::new( publisher, @@ -69,9 +69,8 @@ impl Event { commit, additional_blocks, topic_id, - branch_read_cap_secret, topic_priv_key, - storage, + store, )?)) } } @@ -79,27 +78,27 @@ impl Event { impl EventV0 { pub fn new<'a>( publisher: &PrivKey, - seq: &mut u64, + seq: u64, commit: &Commit, additional_blocks: &Vec, topic_id: TopicId, - branch_read_cap_secret: ReadCapSecret, topic_priv_key: &BranchWriteCapSecret, - storage: &'a Box, + store: &'a Store, ) -> Result { + let branch_read_cap_secret = &store.get_store_readcap().key; let mut blocks = vec![]; for bid in commit.blocks().iter() { - blocks.push(storage.get(bid)?); + blocks.push(store.get(bid)?); } for bid in additional_blocks.iter() { - blocks.push(storage.get(bid)?); + blocks.push(store.get(bid)?); } - (*seq) += 1; + // (*seq) += 1; let publisher_pubkey = publisher.to_pub(); let event_content = EventContentV0 { topic: topic_id, publisher: PeerId::Forwarded(publisher_pubkey), - seq: *seq, + seq, blocks, file_ids: commit .header() diff --git a/ng-repo/src/file.rs b/ng-repo/src/file.rs index e0d0e3b..603f9f0 100644 --- a/ng-repo/src/file.rs +++ b/ng-repo/src/file.rs @@ -21,6 +21,7 @@ use crate::block_storage::*; use crate::errors::*; use crate::log::*; use crate::object::*; +use crate::store::Store; use crate::types::*; /// File errors @@ -80,21 +81,17 @@ pub struct File<'a> { } impl<'a> File<'a> { - pub fn open( - id: ObjectId, - key: SymKey, - storage: &'a Box, - ) -> Result, FileError> { - let root_block = storage.get(&id)?; + pub fn open(id: ObjectId, key: SymKey, store: &'a Store) -> Result, FileError> { + let root_block = store.get(&id)?; if root_block.children().len() == 2 && *root_block.content().commit_header_obj() == CommitHeaderObject::RandomAccess { Ok(File { - internal: Box::new(RandomAccessFile::open(id, key, storage)?), + internal: Box::new(RandomAccessFile::open(id, key, store)?), }) } else { - let obj = Object::load(id, Some(key), storage)?; + let obj = Object::load(id, Some(key), store)?; match obj.content_v0()? { ObjectContentV0::SmallFile(small_file) => Ok(File { internal: Box::new(small_file), @@ -134,7 +131,7 @@ impl ReadFile for SmallFileV0 { /// A RandomAccessFile in memory. This is not used to serialize data pub struct RandomAccessFile<'a> { //storage: Arc<&'a dyn BlockStorage>, - storage: &'a Box, + store: &'a Store, /// accurate once saved or opened meta: RandomAccessFileMeta, @@ -177,7 +174,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> { let mut level_pos = pos; for level in 0..depth { - let tree_block = self.storage.get(¤t_block_id_key.0)?; + let tree_block = self.store.get(¤t_block_id_key.0)?; let (children, content) = tree_block.read(¤t_block_id_key.1)?; if children.len() == 0 || content.len() > 0 { return Err(FileError::BlockDeserializeError); @@ -192,7 +189,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> { level_pos = pos as usize % factor; } - let content_block = self.storage.get(¤t_block_id_key.0)?; + let content_block = self.store.get(¤t_block_id_key.0)?; //log_debug!("CONTENT BLOCK SIZE {}", content_block.size()); let (children, content) = content_block.read(¤t_block_id_key.1)?; @@ -228,7 +225,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> { return Err(FileError::EndOfFile); } let block = &self.blocks[index]; - let content_block = self.storage.get(&block.0)?; + let content_block = self.store.get(&block.0)?; let (children, content) = content_block.read(&block.1)?; if children.len() == 0 && content.len() > 0 { //log_debug!("CONTENT SIZE {}", content.len()); @@ -263,7 +260,7 @@ impl<'a> RandomAccessFile<'a> { conv_key: &[u8; blake3::OUT_LEN], children: Vec, already_existing: &mut HashMap, - storage: &Box, + store: &Store, ) -> Result<(BlockId, BlockKey), StorageError> { let key_hash = blake3::keyed_hash(conv_key, &content); @@ -286,7 +283,7 @@ impl<'a> RandomAccessFile<'a> { let id = block.get_and_save_id(); already_existing.insert(key.clone(), id); //log_debug!("putting *** {}", id); - storage.put(&block)?; + store.put(&block)?; Ok((id, key)) } @@ -294,7 +291,7 @@ impl<'a> RandomAccessFile<'a> { conv_key: &[u8; blake3::OUT_LEN], children: Vec<(BlockId, BlockKey)>, already_existing: &mut HashMap, - storage: &Box, + store: &Store, ) -> Result<(BlockId, BlockKey), StorageError> { let mut ids: Vec = Vec::with_capacity(children.len()); let mut keys: Vec = Vec::with_capacity(children.len()); @@ -305,7 +302,7 @@ impl<'a> RandomAccessFile<'a> { let content = ChunkContentV0::InternalNode(keys); let content_ser = serde_bare::to_vec(&content).unwrap(); - Self::make_block(content_ser, conv_key, ids, already_existing, storage) + Self::make_block(content_ser, conv_key, ids, already_existing, store) } /// Build tree from leaves, returns parent nodes @@ -314,7 +311,7 @@ impl<'a> RandomAccessFile<'a> { leaves: &[(BlockId, BlockKey)], conv_key: &ChaCha20Key, arity: u16, - storage: &'a Box, + store: &Store, ) -> Result<(BlockId, BlockKey), StorageError> { let mut parents: Vec<(BlockId, BlockKey)> = vec![]; let mut chunks = leaves.chunks(arity as usize); @@ -324,19 +321,13 @@ impl<'a> RandomAccessFile<'a> { conv_key, nodes.to_vec(), already_existing, - storage, + store, )?); } //log_debug!("level with {} parents", parents.len()); if 1 < parents.len() { - return Self::make_tree( - already_existing, - parents.as_slice(), - conv_key, - arity, - storage, - ); + return Self::make_tree(already_existing, parents.as_slice(), conv_key, arity, store); } Ok(parents[0].clone()) } @@ -347,7 +338,7 @@ impl<'a> RandomAccessFile<'a> { blocks: &[(BlockId, BlockKey)], meta: &mut RandomAccessFileMeta, conv_key: &ChaCha20Key, - storage: &'a Box, + store: &Store, ) -> Result<((BlockId, BlockKey), (BlockId, BlockKey)), FileError> { let leaf_blocks_nbr = blocks.len(); let arity = meta.arity(); @@ -370,7 +361,7 @@ impl<'a> RandomAccessFile<'a> { blocks[0].clone() } else { // we create the tree - Self::make_tree(already_existing, &blocks, &conv_key, arity, storage)? + Self::make_tree(already_existing, &blocks, &conv_key, arity, store)? }; let meta_object = Object::new_with_convergence_key( @@ -380,7 +371,7 @@ impl<'a> RandomAccessFile<'a> { conv_key, ); //log_debug!("saving meta object"); - _ = meta_object.save(storage)?; + _ = meta_object.save(store)?; // creating the root block that contains as first child the meta_object, and as second child the content_block // it is added to storage in make_parent_block @@ -392,21 +383,20 @@ impl<'a> RandomAccessFile<'a> { content_block.clone(), ], already_existing, - storage, + store, )?; Ok((content_block, root_block)) } /// Creates a new file based on a content that is fully known at the time of creation. + /// /// If you want to stream progressively the content into the new file, you should use new_empty(), write() and save() instead pub fn new_from_slice( content: &[u8], block_size: usize, content_type: String, metadata: Vec, - store: &StoreRepo, - store_secret: &ReadCapSecret, - storage: &'a Box, + store: &'a Store, ) -> Result, FileError> { //let max_block_size = store_max_value_size(); let valid_block_size = store_valid_value_size(block_size) - BLOCK_EXTRA; @@ -415,7 +405,7 @@ impl<'a> RandomAccessFile<'a> { let total_size = content.len() as u64; - let mut conv_key = Object::convergence_key(store, store_secret); + let mut conv_key = Object::convergence_key(store); let mut blocks: Vec<(BlockId, BlockKey)> = vec![]; @@ -430,7 +420,7 @@ impl<'a> RandomAccessFile<'a> { &conv_key, vec![], &mut already_existing, - storage, + store, )?); } assert_eq!( @@ -447,18 +437,13 @@ impl<'a> RandomAccessFile<'a> { depth: 0, }); - let (content_block, root_block) = Self::save_( - &mut already_existing, - &blocks, - &mut meta, - &conv_key, - storage, - )?; + let (content_block, root_block) = + Self::save_(&mut already_existing, &blocks, &mut meta, &conv_key, store)?; conv_key.zeroize(); Ok(Self { - storage, + store, meta, block_contents: HashMap::new(), // not used in this case blocks: vec![], // not used in this case @@ -475,9 +460,7 @@ impl<'a> RandomAccessFile<'a> { block_size: usize, content_type: String, metadata: Vec, - store: &StoreRepo, - store_secret: &ReadCapSecret, - storage: &'a Box, + store: &'a Store, ) -> Self { let valid_block_size = store_valid_value_size(block_size) - BLOCK_EXTRA; @@ -493,14 +476,14 @@ impl<'a> RandomAccessFile<'a> { }); Self { - storage, + store, meta, block_contents: HashMap::new(), blocks: vec![], id: None, key: None, content_block: None, - conv_key: Some(Object::convergence_key(store, store_secret)), + conv_key: Some(Object::convergence_key(store)), remainder: vec![], size: 0, } @@ -535,7 +518,7 @@ impl<'a> RandomAccessFile<'a> { &conv_key, vec![], &mut already_existing, - self.storage, + self.store, )?); } else { // not enough data to create a new block @@ -558,7 +541,7 @@ impl<'a> RandomAccessFile<'a> { &conv_key, vec![], &mut already_existing, - self.storage, + self.store, )?); } else { self.remainder = Vec::from(chunck); @@ -585,7 +568,7 @@ impl<'a> RandomAccessFile<'a> { &self.conv_key.unwrap(), vec![], &mut HashMap::new(), - self.storage, + self.store, )?); } @@ -597,7 +580,7 @@ impl<'a> RandomAccessFile<'a> { &self.blocks, &mut self.meta, self.conv_key.as_ref().unwrap(), - self.storage, + self.store, )?; self.conv_key.as_mut().unwrap().zeroize(); @@ -617,10 +600,10 @@ impl<'a> RandomAccessFile<'a> { pub fn open( id: ObjectId, key: SymKey, - storage: &'a Box, + store: &'a Store, ) -> Result, FileError> { // load root block - let root_block = storage.get(&id)?; + let root_block = store.get(&id)?; if root_block.children().len() != 2 || *root_block.content().commit_header_obj() != CommitHeaderObject::RandomAccess @@ -634,7 +617,7 @@ impl<'a> RandomAccessFile<'a> { let meta_object = Object::load( root_sub_blocks[0].0, Some(root_sub_blocks[0].1.clone()), - storage, + store, )?; let meta = match meta_object.content_v0()? { @@ -643,7 +626,7 @@ impl<'a> RandomAccessFile<'a> { }; Ok(RandomAccessFile { - storage, + store, meta, block_contents: HashMap::new(), // not used in this case blocks: vec![], // not used in this case @@ -659,7 +642,7 @@ impl<'a> RandomAccessFile<'a> { pub fn blocks(&self) -> impl Iterator + '_ { self.blocks .iter() - .map(|key| self.storage.get(&key.0).unwrap()) + .map(|key| self.store.get(&key.0).unwrap()) } /// Size once encoded, before deduplication. Only available before save() @@ -674,7 +657,7 @@ impl<'a> RandomAccessFile<'a> { let mut total = 0; self.block_contents .values() - .for_each(|b| total += self.storage.get(b).unwrap().size()); + .for_each(|b| total += self.store.get(b).unwrap().size()); total } @@ -734,20 +717,20 @@ mod test { use std::io::BufReader; use std::io::Read; - struct Test<'a> { - storage: Box, - } - - impl<'a> Test<'a> { - fn storage(s: impl BlockStorage + 'a) -> Self { - Test { - storage: Box::new(s), - } - } - fn s(&self) -> &Box { - &self.storage - } - } + // struct Test<'a> { + // storage: Box, + // } + + // impl<'a> Test<'a> { + // fn storage(s: impl BlockStorage + 'a) -> Self { + // Test { + // storage: Box::new(s), + // } + // } + // fn s(&self) -> &Box { + // &self.store + // } + // } /// Checks that a content that does fit in one block, creates an arity of 0 #[test] @@ -755,15 +738,10 @@ mod test { let block_size = store_max_value_size(); //store_valid_value_size(0) - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - //let storage: Arc<&dyn BlockStorage> = Arc::new(&hashmap_storage); - ////// 1 MB of data! let data_size = block_size - BLOCK_EXTRA; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 1MB of data"); let content: Vec = vec![99; data_size]; @@ -773,9 +751,7 @@ mod test { block_size, "text/plain".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ) .expect("new_from_slice"); log_debug!("{}", file); @@ -818,9 +794,9 @@ mod test { // MAX_ARITY_LEAVES * (MAX_ARITY_LEAVES + 1) * MAX_ARITY_LEAVES + MAX_ARITY_LEAVES + 1 // ); assert_eq!(file.depth(), Ok(0)); - assert_eq!(t.s().len(), Ok(3)); + assert_eq!(store.len(), Ok(3)); - let file = RandomAccessFile::open(id, file.key.unwrap(), t.s()).expect("re open"); + let file = RandomAccessFile::open(id, file.key.unwrap(), &store).expect("re open"); log_debug!("{}", file); @@ -834,13 +810,10 @@ mod test { const MAX_ARITY_LEAVES: usize = 15887; const MAX_DATA_PAYLOAD_SIZE: usize = 1048564; - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - ////// 16 GB of data! let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 16GB of data"); let content: Vec = vec![99; data_size]; @@ -851,9 +824,7 @@ mod test { store_max_value_size(), "text/plain".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ) .expect("new_from_slice"); log_debug!("{}", file); @@ -864,7 +835,7 @@ mod test { assert_eq!(file.depth(), Ok(1)); - assert_eq!(t.s().len(), Ok(4)); + assert_eq!(store.len(), Ok(4)); } /// Checks that a content that doesn't fit in all the children of first level in tree @@ -873,13 +844,10 @@ mod test { const MAX_ARITY_LEAVES: usize = 15887; const MAX_DATA_PAYLOAD_SIZE: usize = 1048564; - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - ////// 16 GB of data! let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE + 1; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 16GB of data"); let content: Vec = vec![99; data_size]; @@ -889,9 +857,7 @@ mod test { store_max_value_size(), "text/plain".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ) .expect("new_from_slice"); log_debug!("{}", file); @@ -903,7 +869,7 @@ mod test { assert_eq!(file.depth().unwrap(), 2); - assert_eq!(t.s().len(), Ok(7)); + assert_eq!(store.len(), Ok(7)); } /// Checks that a content that doesn't fit in all the children of first level in tree @@ -911,14 +877,12 @@ mod test { pub fn test_depth_3() { const MAX_ARITY_LEAVES: usize = 61; const MAX_DATA_PAYLOAD_SIZE: usize = 4084; - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); ////// 900 MB of data! let data_size = MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 900MB of data"); let content: Vec = vec![99; data_size]; @@ -928,9 +892,7 @@ mod test { store_valid_value_size(0), "text/plain".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ) .expect("new_from_slice"); log_debug!("{}", file); @@ -965,7 +927,7 @@ mod test { // ); assert_eq!(file.depth().unwrap(), 3); - assert_eq!(t.s().len(), Ok(6)); + assert_eq!(store.len(), Ok(6)); } /// Checks that a content that doesn't fit in all the children of first level in tree @@ -981,10 +943,7 @@ mod test { * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE; - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 55GB of data"); let content: Vec = vec![99; data_size]; @@ -994,9 +953,7 @@ mod test { store_valid_value_size(0), "text/plain".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ) .expect("new_from_slice"); @@ -1009,7 +966,7 @@ mod test { assert_eq!(file.depth().unwrap(), 4); - assert_eq!(t.s().len(), Ok(7)); + assert_eq!(store.len(), Ok(7)); } /// Test async write to a file all at once @@ -1022,19 +979,14 @@ mod test { .read_to_end(&mut img_buffer) .expect("read of test.jpg"); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating file with the JPG content"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_max_value_size(), //store_valid_value_size(0),// "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1097,19 +1049,14 @@ mod test { .read_to_end(&mut img_buffer) .expect("read of test.jpg"); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating file with the JPG content"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_max_value_size(), //store_valid_value_size(0),// "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1174,19 +1121,14 @@ mod test { .read_to_end(&mut img_buffer) .expect("read of test.jpg"); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating file with the JPG content"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_valid_value_size(0), "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1259,19 +1201,14 @@ mod test { let first_block_content = img_buffer[0..4084].to_vec(); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating file with the JPG content"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_valid_value_size(0), "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1342,19 +1279,14 @@ mod test { let chunk_nbr = data_size / 5000000; let last_chunk = data_size % 5000000; - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating empty file"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_valid_value_size(0), "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1383,7 +1315,7 @@ mod test { assert_eq!(file.depth().unwrap(), 4); - assert_eq!(t.s().len(), Ok(7)); + assert_eq!(store.len(), Ok(7)); } /// Test open @@ -1396,19 +1328,14 @@ mod test { .read_to_end(&mut img_buffer) .expect("read of test.jpg"); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating file with the JPG content"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_max_value_size(), //store_valid_value_size(0),// "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1419,7 +1346,7 @@ mod test { file.save().expect("save"); - let file2 = RandomAccessFile::open(file.id().unwrap(), file.key.unwrap(), t.s()) + let file2 = RandomAccessFile::open(file.id().unwrap(), file.key.unwrap(), &store) .expect("reopen file"); // this works only because store_max_value_size() is bigger than the actual size of the JPEG file. so it fits in one block. @@ -1459,17 +1386,14 @@ mod test { let content = ObjectContent::new_file_v0_with_content(img_buffer.clone(), "image/jpeg"); let max_object_size = store_max_value_size(); - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - let mut obj = Object::new(content, None, max_object_size, &store_repo, &store_secret); + let store = Store::dummy_public_v0(); + let mut obj = Object::new(content, None, max_object_size, &store); log_debug!("{}", obj); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); + let _ = obj.save_in_test(&store).expect("save"); - let _ = obj.save_in_test(t.s()).expect("save"); - - let file = File::open(obj.id(), obj.key().unwrap(), t.s()).expect("open"); + let file = File::open(obj.id(), obj.key().unwrap(), &store).expect("open"); let res = file.read(0, len).expect("read all"); @@ -1488,20 +1412,11 @@ mod test { let len = img_buffer.len(); let max_object_size = store_max_value_size(); - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); + let store = Store::dummy_public_v0(); log_debug!("creating empty file"); - let mut file: RandomAccessFile = RandomAccessFile::new_empty( - max_object_size, - "image/jpeg".to_string(), - vec![], - &store_repo, - &store_secret, - t.s(), - ); + let mut file: RandomAccessFile = + RandomAccessFile::new_empty(max_object_size, "image/jpeg".to_string(), vec![], &store); file.write(&img_buffer).expect("write all"); @@ -1514,7 +1429,7 @@ mod test { let file = File::open( file.id().unwrap(), file.key().as_ref().unwrap().clone(), - t.s(), + &store, ) .expect("open"); @@ -1533,19 +1448,14 @@ mod test { let f = std::fs::File::open("[enter path of a big file here]").expect("open of a big file"); let mut reader = BufReader::new(f); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating empty file"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_valid_value_size(0), "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); @@ -1587,19 +1497,14 @@ mod test { let f = std::fs::File::open("[enter path of a big file here]").expect("open of a big file"); let mut reader = BufReader::new(f); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating empty file"); let mut file: RandomAccessFile = RandomAccessFile::new_empty( store_max_value_size(), "image/jpeg".to_string(), vec![], - &store_repo, - &store_secret, - t.s(), + &store, ); log_debug!("{}", file); diff --git a/ng-repo/src/kcv_storage.rs b/ng-repo/src/kcv_storage.rs index 9ce4a69..a60124c 100644 --- a/ng-repo/src/kcv_storage.rs +++ b/ng-repo/src/kcv_storage.rs @@ -8,6 +8,8 @@ //! KeyColumnValue Store abstraction +use std::collections::HashMap; + use crate::errors::StorageError; // TODO:remove mut on self for trait WriteTransaction methods @@ -15,46 +17,62 @@ use crate::errors::StorageError; pub trait WriteTransaction: ReadTransaction { /// Save a property value to the store. fn put( - &mut self, + &self, prefix: u8, key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError>; /// Replace the property of a key (single value) to the store. fn replace( - &mut self, + &self, prefix: u8, key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError>; /// Delete a property from the store. - fn del(&mut self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError>; + fn del( + &self, + prefix: u8, + key: &Vec, + suffix: Option, + family: &Option, + ) -> Result<(), StorageError>; /// Delete all properties of a key from the store. fn del_all( - &mut self, + &self, prefix: u8, key: &Vec, all_suffixes: &[u8], + family: &Option, ) -> Result<(), StorageError>; /// Delete a specific value for a property from the store. fn del_property_value( - &mut self, + &self, prefix: u8, key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError>; } pub trait ReadTransaction { /// Load a property from the store. - fn get(&self, prefix: u8, key: &Vec, suffix: Option) -> Result, StorageError>; + fn get( + &self, + prefix: u8, + key: &Vec, + suffix: Option, + family: &Option, + ) -> Result, StorageError>; /// Load all the values of a property from the store. #[deprecated( @@ -65,8 +83,17 @@ pub trait ReadTransaction { prefix: u8, key: &Vec, suffix: Option, + family: &Option, ) -> Result>, StorageError>; + fn get_all_properties_of_key( + &self, + prefix: u8, + key: Vec, + properties: Vec, + family: &Option, + ) -> Result>, StorageError>; + /// Check if a specific value exists for a property from the store. fn has_property_value( &self, @@ -74,6 +101,7 @@ pub trait ReadTransaction { key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError>; /// retrieves all the keys and values with the given prefix and key_size. if no suffix is specified, then all (including none) the suffices are returned @@ -83,45 +111,46 @@ pub trait ReadTransaction { key_size: usize, key_prefix: Vec, suffix: Option, + family: &Option, ) -> Result, Vec)>, StorageError>; } -pub trait KCVStore: ReadTransaction { +pub trait KCVStore: WriteTransaction { fn write_transaction( &self, method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>, ) -> Result<(), StorageError>; - /// Save a property value to the store. - fn put( - &self, - prefix: u8, - key: &Vec, - suffix: Option, - value: Vec, - ) -> Result<(), StorageError>; - - /// Replace the property of a key (single value) to the store. - fn replace( - &self, - prefix: u8, - key: &Vec, - suffix: Option, - value: Vec, - ) -> Result<(), StorageError>; - - /// Delete a property from the store. - fn del(&self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError>; - - /// Delete all properties of a key from the store. - fn del_all(&self, prefix: u8, key: &Vec, all_suffixes: &[u8]) -> Result<(), StorageError>; - - /// Delete a specific value for a property from the store. - fn del_property_value( - &self, - prefix: u8, - key: &Vec, - suffix: Option, - value: Vec, - ) -> Result<(), StorageError>; + // /// Save a property value to the store. + // fn put( + // &self, + // prefix: u8, + // key: &Vec, + // suffix: Option, + // value: Vec, + // ) -> Result<(), StorageError>; + + // /// Replace the property of a key (single value) to the store. + // fn replace( + // &self, + // prefix: u8, + // key: &Vec, + // suffix: Option, + // value: Vec, + // ) -> Result<(), StorageError>; + + // /// Delete a property from the store. + // fn del(&self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError>; + + // /// Delete all properties of a key from the store. + // fn del_all(&self, prefix: u8, key: &Vec, all_suffixes: &[u8]) -> Result<(), StorageError>; + + // /// Delete a specific value for a property from the store. + // fn del_property_value( + // &self, + // prefix: u8, + // key: &Vec, + // suffix: Option, + // value: Vec, + // ) -> Result<(), StorageError>; } diff --git a/ng-repo/src/lib.rs b/ng-repo/src/lib.rs index 2982d87..41ef77e 100644 --- a/ng-repo/src/lib.rs +++ b/ng-repo/src/lib.rs @@ -24,6 +24,8 @@ pub mod repo; pub mod site; +pub mod store; + pub mod event; pub mod utils; diff --git a/ng-repo/src/object.rs b/ng-repo/src/object.rs index 3fec9de..4671965 100644 --- a/ng-repo/src/object.rs +++ b/ng-repo/src/object.rs @@ -21,6 +21,7 @@ use zeroize::Zeroize; use crate::block_storage::*; use crate::errors::*; use crate::log::*; +use crate::store::Store; use crate::types::*; pub const BLOCK_EXTRA: usize = 12; // 8 is the smallest extra + BLOCK_MAX_DATA_EXTRA @@ -60,10 +61,14 @@ pub struct Object { impl Object { pub(crate) fn convergence_key( - store_pubkey: &StoreRepo, - store_readcap_secret: &ReadCapSecret, + /*store_pubkey: &StoreRepo, + store_readcap_secret: &ReadCapSecret,*/ + store: &Store, ) -> [u8; blake3::OUT_LEN] { - let mut key_material = match (*store_pubkey.repo_id(), store_readcap_secret.clone()) { + let mut key_material = match ( + *store.get_store_repo().repo_id(), + store.get_store_readcap_secret().clone(), + ) { (PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => { [pubkey, secret].concat() } @@ -271,10 +276,9 @@ impl Object { content: ObjectContent, header: Option, block_size: usize, - store: &StoreRepo, - store_secret: &ReadCapSecret, + store: &Store, ) -> Object { - let mut conv_key = Self::convergence_key(store, store_secret); + let mut conv_key = Self::convergence_key(store); let res = Self::new_with_convergence_key(content, header, block_size, &conv_key); conv_key.zeroize(); res @@ -424,11 +428,11 @@ impl Object { pub fn load( id: ObjectId, key: Option, - store: &Box, + store: &Store, ) -> Result { fn load_tree( parents: Vec, - store: &Box, + store: &Store, blocks: &mut Vec, missing: &mut Vec, block_contents: &mut HashMap, @@ -517,10 +521,7 @@ impl Object { } /// Save blocks of the object and the blocks of the header object in the store - pub fn save( - &self, - store: &Box, - ) -> Result, StorageError> { + pub fn save(&self, store: &Store) -> Result, StorageError> { let mut deduplicated: HashSet = HashSet::new(); //.chain(self.header_blocks.iter()) for block_id in self.blocks.iter() { @@ -544,10 +545,7 @@ impl Object { } #[cfg(test)] - pub fn save_in_test( - &mut self, - store: &Box, - ) -> Result, StorageError> { + pub fn save_in_test(&mut self, store: &Store) -> Result, StorageError> { assert!(self.already_saved == false); self.already_saved = true; @@ -992,15 +990,9 @@ mod test { content: vec![], }); let content = ObjectContent::V0(ObjectContentV0::SmallFile(file)); - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); let header = CommitHeader::new_with_acks([ObjectId::dummy()].to_vec()); - let _obj = Object::new( - content, - header, - store_max_value_size(), - &store_repo, - &store_secret, - ); + let _obj = Object::new(content, header, store_max_value_size(), &store); } /// Test JPEG file @@ -1015,8 +1007,8 @@ mod test { let content = ObjectContent::new_file_v0_with_content(img_buffer, "image/jpeg"); let max_object_size = store_max_value_size(); - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - let obj = Object::new(content, None, max_object_size, &store_repo, &store_secret); + let store = Store::dummy_public_v0(); + let obj = Object::new(content, None, max_object_size, &store); log_debug!("{}", obj); @@ -1046,15 +1038,9 @@ mod test { //let header = CommitHeader::new_with_acks(acks.clone()); let max_object_size = 0; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); - let mut obj = Object::new( - content.clone(), - None, - max_object_size, - &store_repo, - &store_secret, - ); + let mut obj = Object::new(content.clone(), None, max_object_size, &store); log_debug!("{}", obj); @@ -1067,7 +1053,6 @@ mod test { } Err(e) => panic!("Object parse error: {:?}", e), } - let store = Box::new(HashMapBlockStorage::new()); obj.save_in_test(&store).expect("Object save error"); @@ -1101,7 +1086,7 @@ mod test { /// Checks that a content that fits the root node, will not be chunked into children nodes #[test] pub fn test_depth_0() { - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); let empty_file = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 { @@ -1168,13 +1153,7 @@ mod test { // let content_ser = serde_bare::to_vec(&content).unwrap(); // log_debug!("content len for 2*524277: {}", content_ser.len()); - let empty_obj = Object::new( - empty_file, - None, - store_max_value_size(), - &store_repo, - &store_secret, - ); + let empty_obj = Object::new(empty_file, None, store_max_value_size(), &store); let empty_file_size = empty_obj.size(); log_debug!("empty file size: {}", empty_file_size); @@ -1191,13 +1170,7 @@ mod test { let content_ser = serde_bare::to_vec(&content).unwrap(); log_debug!("content len: {}", content_ser.len()); - let object = Object::new( - content, - None, - store_max_value_size(), - &store_repo, - &store_secret, - ); + let object = Object::new(content, None, store_max_value_size(), &store); log_debug!("{}", object); log_debug!("object size: {}", object.size()); @@ -1217,7 +1190,7 @@ mod test { ////// 16 GB of data! let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE - 10; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 16GB of data"); let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 { content_type: "".into(), @@ -1227,13 +1200,7 @@ mod test { //let content_ser = serde_bare::to_vec(&content).unwrap(); //log_debug!("content len: {}", content_ser.len()); log_debug!("creating object with that data"); - let object = Object::new( - content, - None, - store_max_value_size(), - &store_repo, - &store_secret, - ); + let object = Object::new(content, None, store_max_value_size(), &store); log_debug!("{}", object); let obj_size = object.size(); @@ -1260,7 +1227,7 @@ mod test { ////// 16 GB of data! let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 16GB of data"); let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 { content_type: "".into(), @@ -1270,13 +1237,7 @@ mod test { //let content_ser = serde_bare::to_vec(&content).unwrap(); //log_debug!("content len: {}", content_ser.len()); log_debug!("creating object with that data"); - let object = Object::new( - content, - None, - store_max_value_size(), - &store_repo, - &store_secret, - ); + let object = Object::new(content, None, store_max_value_size(), &store); log_debug!("{}", object); let obj_size = object.size(); @@ -1304,7 +1265,7 @@ mod test { let data_size = MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE - 10; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 900MB of data"); let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 { content_type: "".into(), @@ -1314,13 +1275,7 @@ mod test { //let content_ser = serde_bare::to_vec(&content).unwrap(); //log_debug!("content len: {}", content_ser.len()); log_debug!("creating object with that data"); - let object = Object::new( - content, - None, - store_valid_value_size(0), - &store_repo, - &store_secret, - ); + let object = Object::new(content, None, store_valid_value_size(0), &store); log_debug!("{}", object); let obj_size = object.size(); @@ -1362,7 +1317,7 @@ mod test { * MAX_DATA_PAYLOAD_SIZE - 12; - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); + let store = Store::dummy_public_v0(); log_debug!("creating 52GB of data"); let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 { content_type: "".into(), @@ -1372,13 +1327,7 @@ mod test { //let content_ser = serde_bare::to_vec(&content).unwrap(); //log_debug!("content len: {}", content_ser.len()); log_debug!("creating object with that data"); - let object = Object::new( - content, - None, - store_valid_value_size(0), - &store_repo, - &store_secret, - ); + let object = Object::new(content, None, store_valid_value_size(0), &store); log_debug!("{}", object); let obj_size = object.size(); diff --git a/ng-repo/src/repo.rs b/ng-repo/src/repo.rs index e86053b..1b5658c 100644 --- a/ng-repo/src/repo.rs +++ b/ng-repo/src/repo.rs @@ -14,17 +14,15 @@ use crate::errors::*; use crate::event::*; use crate::log::*; use crate::object::Object; +use crate::store::Store; use crate::types::*; use crate::utils::generate_keypair; use crate::utils::sign; use core::fmt; -use rand::prelude::*; use std::collections::HashMap; use std::collections::HashSet; -use threshold_crypto::{SecretKeySet, SecretKeyShare}; - impl RepositoryV0 { pub fn new(id: &PubKey, metadata: &Vec) -> RepositoryV0 { RepositoryV0 { @@ -76,20 +74,20 @@ impl UserInfo { } /// In memory Repository representation. With helper functions that access the underlying UserStore and keeps proxy of the values -pub struct Repo<'a> { +pub struct Repo { + pub id: RepoId, /// Repo definition pub repo_def: Repository, pub signer: Option, pub members: HashMap, - - storage: &'a Box, + pub store: Box, } -impl<'a> fmt::Display for Repo<'a> { +impl fmt::Display for Repo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "====== Repo ======")?; + writeln!(f, "====== Repo ====== {}", self.id)?; write!(f, "== repo_def: {}", self.repo_def)?; @@ -103,364 +101,12 @@ impl<'a> fmt::Display for Repo<'a> { } } -impl<'a> Repo<'a> { - /// returns the Repo and the last seq_num of the peer - pub fn new_default( - creator: &UserId, - creator_priv_key: &PrivKey, - publisher_peer: &PrivKey, - peer_last_seq_num: &mut u64, - store_repo: &StoreRepo, - store_secret: &ReadCapSecret, - storage: &'a Box, - ) -> Result<(Self, Vec), NgError> { - let mut events = Vec::with_capacity(6); - - // creating the Repository commit - - let (repo_priv_key, repo_pub_key) = generate_keypair(); - - //let overlay = store_repo.overlay_id_for_read_purpose(); - - let repository = Repository::V0(RepositoryV0 { - id: repo_pub_key, - verification_program: vec![], - creator: None, - metadata: vec![], - }); - - let repository_commit_body = CommitBody::V0(CommitBodyV0::Repository(repository.clone())); - - let repository_commit = Commit::new_with_body_acks_deps_and_save( - &repo_priv_key, - &repo_pub_key, - repo_pub_key, - QuorumType::NoSigning, - vec![], - vec![], - repository_commit_body, - &store_repo, - &store_secret, - storage, - )?; - - log_debug!("REPOSITORY COMMIT {}", repository_commit); - - let repository_commit_ref = repository_commit.reference().unwrap(); - - let (topic_priv_key, topic_pub_key) = generate_keypair(); - - // creating the RootBranch commit, acks to Repository commit - - let repo_write_cap_secret = SymKey::random(); - - let root_branch_commit_body = - CommitBody::V0(CommitBodyV0::RootBranch(RootBranch::V0(RootBranchV0 { - id: repo_pub_key, - repo: repository_commit_ref.clone(), - store: store_repo.into(), - store_sig: None, //TODO: the store signature - topic: topic_pub_key, - topic_privkey: Branch::encrypt_topic_priv_key( - &topic_priv_key, - topic_pub_key, - repo_pub_key, - &repo_write_cap_secret, - ), - inherit_perms_users_and_quorum_from_store: None, - quorum: None, - reconciliation_interval: RelTime::None, - owners: vec![creator.clone()], - metadata: vec![], - }))); - - let root_branch_commit = Commit::new_with_body_acks_deps_and_save( - &repo_priv_key, - &repo_pub_key, - repo_pub_key, - QuorumType::NoSigning, - vec![], - vec![repository_commit_ref.clone()], - root_branch_commit_body, - &store_repo, - &store_secret, - storage, - )?; - - log_debug!("ROOT_BRANCH COMMIT {}", root_branch_commit); - - // adding the 2 events for the Repository and Rootbranch commits - - //peer_last_seq_num += 1; - events.push(Event::new( - publisher_peer, - peer_last_seq_num, - &repository_commit, - &vec![], - topic_pub_key, - root_branch_commit.key().unwrap(), - &topic_priv_key, - storage, - )?); - - //peer_last_seq_num += 1; - events.push(Event::new( - publisher_peer, - peer_last_seq_num, - &root_branch_commit, - &vec![], - topic_pub_key, - root_branch_commit.key().unwrap(), - &topic_priv_key, - storage, - )?); - - // creating the main branch - - let (main_branch_priv_key, main_branch_pub_key) = generate_keypair(); - - let (main_branch_topic_priv_key, main_branch_topic_pub_key) = generate_keypair(); - - let main_branch_commit_body = CommitBody::V0(CommitBodyV0::Branch(Branch::V0(BranchV0 { - id: main_branch_pub_key, - content_type: BranchContentType::None, - repo: repository_commit_ref.clone(), - root_branch_readcap_id: root_branch_commit.id().unwrap(), - topic: main_branch_topic_pub_key, - topic_privkey: Branch::encrypt_topic_priv_key( - &main_branch_topic_priv_key, - main_branch_topic_pub_key, - main_branch_pub_key, - &repo_write_cap_secret, - ), - metadata: vec![], - }))); - - let main_branch_commit = Commit::new_with_body_acks_deps_and_save( - &main_branch_priv_key, - &main_branch_pub_key, - main_branch_pub_key, - QuorumType::NoSigning, - vec![], - vec![], - main_branch_commit_body, - &store_repo, - &store_secret, - storage, - )?; - - log_debug!("MAIN BRANCH COMMIT {}", main_branch_commit); - - // adding the event for the Branch commit - - // peer_last_seq_num += 1; - events.push(Event::new( - publisher_peer, - peer_last_seq_num, - &main_branch_commit, - &vec![], - main_branch_topic_pub_key, - main_branch_commit.key().unwrap(), - &main_branch_topic_priv_key, - storage, - )?); - - // creating the AddBranch commit (on root_branch), deps to the RootBranch commit - // author is the owner - - let add_branch_commit_body = - CommitBody::V0(CommitBodyV0::AddBranch(AddBranch::V0(AddBranchV0 { - branch_type: BranchType::Main, - topic_id: main_branch_topic_pub_key, - branch_read_cap: main_branch_commit.reference().unwrap(), - }))); - - let add_branch_commit = Commit::new_with_body_acks_deps_and_save( - creator_priv_key, - creator, - repo_pub_key, - QuorumType::Owners, - vec![root_branch_commit.reference().unwrap()], - vec![], - add_branch_commit_body, - &store_repo, - &store_secret, - storage, - )?; - - log_debug!("ADD_BRANCH COMMIT {}", add_branch_commit); - - // TODO: optional AddMember and AddPermission, that should be added as deps to the SynSignature below (and to the commits of the SignatureContent) - // using the creator as author (and incrementing their peer's seq_num) - - // preparing the threshold keys for the unique owner - let mut rng = rand::thread_rng(); - let sk_set = SecretKeySet::random(0, &mut rng); - let pk_set = sk_set.public_keys(); - - let sk_share = sk_set.secret_key_share(0); - - // creating signature for RootBranch, AddBranch and Branch commits - // signed with owner threshold signature (threshold = 0) - - let signature_content = SignatureContent::V0(SignatureContentV0 { - commits: vec![ - root_branch_commit.id().unwrap(), - add_branch_commit.id().unwrap(), - main_branch_commit.id().unwrap(), - ], - }); - - let signature_content_ser = serde_bare::to_vec(&signature_content).unwrap(); - let sig_share = sk_share.sign(signature_content_ser); - let sig = pk_set - .combine_signatures([(0, &sig_share)]) - .map_err(|_| NgError::IncompleteSignature)?; - - let threshold_sig = ThresholdSignatureV0::Owners((sig)); - - // creating root certificate of the repo - - let cert_content = CertificateContentV0 { - previous: repository_commit_ref, - readcap_id: root_branch_commit.id().unwrap(), - owners_pk_set: pk_set.public_key(), - orders_pk_sets: OrdersPublicKeySetsV0::None, - }; - - // signing the root certificate - let cert_content_ser = serde_bare::to_vec(&cert_content).unwrap(); - let sig = sign(&repo_priv_key, &repo_pub_key, &cert_content_ser)?; - let cert_sig = CertificateSignatureV0::Repo(sig); - - let cert = Certificate::V0(CertificateV0 { - content: cert_content, - sig: cert_sig, - }); - // saving the certificate - let cert_object = Object::new( - ObjectContent::V0(ObjectContentV0::Certificate(cert)), - None, - 0, - &store_repo, - &store_secret, - ); - let mut cert_obj_blocks = cert_object.save(storage)?; - - // finally getting the signature: - - let signature = Signature::V0(SignatureV0 { - content: signature_content, - threshold_sig, - certificate_ref: cert_object.reference().unwrap(), - }); - - // saving the signature - let sig_object = Object::new( - ObjectContent::V0(ObjectContentV0::Signature(signature)), - None, - 0, - &store_repo, - &store_secret, - ); - let mut sig_obj_blocks = sig_object.save(storage)?; - - // keeping the Secret Key Share of the owner - let signer_cap = SignerCap { - repo: repo_pub_key, - epoch: root_branch_commit.id().unwrap(), - owner: Some(threshold_crypto::serde_impl::SerdeSecret(sk_share)), - total_order: None, - partial_order: None, - }; - - let sync_signature = SyncSignature::V0(sig_object.reference().unwrap()); - - // creating the SyncSignature for the root_branch with deps to the AddBranch and acks to the RootBranch commit as it is its direct causal future. - let sync_sig_commit_body = CommitBody::V0(CommitBodyV0::SyncSignature(sync_signature)); - - let sync_sig_on_root_branch_commit = Commit::new_with_body_acks_deps_and_save( - creator_priv_key, - creator, - repo_pub_key, - QuorumType::IamTheSignature, - vec![add_branch_commit.reference().unwrap()], - vec![root_branch_commit.reference().unwrap()], - sync_sig_commit_body.clone(), - &store_repo, - &store_secret, - storage, - )?; - - // adding the event for the sync_sig_on_root_branch_commit - - let mut additional_blocks = Vec::with_capacity( - cert_obj_blocks.len() + sig_obj_blocks.len() + add_branch_commit.blocks().len(), - ); - additional_blocks.extend(cert_obj_blocks.iter()); - additional_blocks.extend(sig_obj_blocks.iter()); - additional_blocks.extend(add_branch_commit.blocks().iter()); - - //peer_last_seq_num += 1; - events.push(Event::new( - publisher_peer, - peer_last_seq_num, - &sync_sig_on_root_branch_commit, - &additional_blocks, - topic_pub_key, - root_branch_commit.key().unwrap(), - &topic_priv_key, - storage, - )?); - - // creating the SyncSignature for the main branch with deps to the Branch commit and acks also to this commit as it is its direct causal future. - - let sync_sig_on_main_branch_commit = Commit::new_with_body_acks_deps_and_save( - creator_priv_key, - creator, - main_branch_pub_key, - QuorumType::IamTheSignature, - vec![main_branch_commit.reference().unwrap()], - vec![main_branch_commit.reference().unwrap()], - sync_sig_commit_body, - &store_repo, - &store_secret, - storage, - )?; - - // adding the event for the sync_sig_on_main_branch_commit - - let mut additional_blocks = - Vec::with_capacity(cert_obj_blocks.len() + sig_obj_blocks.len()); - additional_blocks.append(&mut cert_obj_blocks); - additional_blocks.append(&mut sig_obj_blocks); - - // peer_last_seq_num += 1; - events.push(Event::new( - publisher_peer, - peer_last_seq_num, - &sync_sig_on_main_branch_commit, - &additional_blocks, - main_branch_topic_pub_key, - main_branch_commit.key().unwrap(), - &main_branch_topic_priv_key, - storage, - )?); - - // TODO: add the CertificateRefresh event on main branch - - // += 1; - - // preparing the Repo - - let repo = Repo { - repo_def: repository, - signer: Some(signer_cap), - members: HashMap::new(), - storage, - }; - - Ok((repo, events)) +impl Repo { + #[cfg(test)] + #[allow(deprecated)] + pub fn new_with_perms(perms: &[PermissionV0], store: Box) -> Self { + let pub_key = PubKey::nil(); + Self::new_with_member(&pub_key, &pub_key, perms, OverlayId::dummy(), store) } pub fn new_with_member( @@ -468,7 +114,7 @@ impl<'a> Repo<'a> { member: &UserId, perms: &[PermissionV0], overlay: OverlayId, - storage: &'a Box, + store: Box, ) -> Self { let mut members = HashMap::new(); let permissions = HashMap::from_iter( @@ -487,20 +133,21 @@ impl<'a> Repo<'a> { }, ); Self { + id: id.clone(), repo_def: Repository::new(id, &vec![]), members, - storage, + store, signer: None, } } pub fn verify_permission(&self, commit: &Commit) -> Result<(), NgError> { let content_author = commit.content_v0().author; - let body = commit.load_body(&self.storage)?; - match self.members.get(&content_author) { - Some(info) => return info.has_any_perm(&body.required_permission()), - None => {} - } + // let body = commit.load_body(self.store.unwrap())?; + // match self.members.get(&content_author) { + // Some(info) => return info.has_any_perm(&body.required_permission()), + // None => {} + // } Err(NgError::PermissionDenied) } @@ -511,65 +158,7 @@ impl<'a> Repo<'a> { } } - pub fn get_storage(&self) -> &Box { - self.storage - } -} - -#[cfg(test)] -mod test { - - use crate::object::*; - use crate::repo::*; - - struct Test<'a> { - storage: Box, - } - - impl<'a> Test<'a> { - fn storage(s: impl BlockStorage + 'a) -> Self { - Test { - storage: Box::new(s), - } - } - fn s(&self) -> &Box { - &self.storage - } - } - - #[test] - pub fn test_new_repo_default() { - let (creator_priv_key, creator_pub_key) = generate_keypair(); - - let (publisher_privkey, publisher_pubkey) = generate_keypair(); - let publisher_peer = PeerId::Forwarded(publisher_pubkey); - - let mut peer_last_seq_num = 10; - - let (store_repo, store_secret) = StoreRepo::dummy_public_v0(); - let hashmap_storage = HashMapBlockStorage::new(); - let t = Test::storage(hashmap_storage); - - let (repo, events) = Repo::new_default( - &creator_pub_key, - &creator_priv_key, - &publisher_privkey, - &mut peer_last_seq_num, - &store_repo, - &store_secret, - t.s(), - ) - .expect("new_default"); - - log_debug!("REPO OBJECT {}", repo); - - log_debug!("events: {}\n", events.len()); - let mut i = 0; - for e in events { - log_debug!("========== EVENT {:03}: {}", i, e); - i += 1; - } - - assert_eq!(peer_last_seq_num, 15); - } + // pub(crate) fn get_store(&self) -> &Store { + // self.store.unwrap() + // } } diff --git a/ng-repo/src/store.rs b/ng-repo/src/store.rs new file mode 100644 index 0000000..ef95168 --- /dev/null +++ b/ng-repo/src/store.rs @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers + * All rights reserved. + * Licensed under the Apache License, Version 2.0 + * + * or the MIT license , + * at your option. All files in the project carrying such + * notice may not be copied, modified, or distributed except + * according to those terms. +*/ + +//! Store of a Site, or of a Group or Dialog + +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; + +use crate::block_storage::BlockStorage; +use crate::errors::{NgError, StorageError}; +use crate::object::Object; +use crate::repo::Repo; +use crate::types::*; +use crate::utils::{generate_keypair, sign, verify}; + +use crate::log::*; + +use rand::prelude::*; + +use threshold_crypto::{SecretKeySet, SecretKeyShare}; + +pub struct Store { + store_repo: StoreRepo, + store_readcap: ReadCap, + overlay_id: OverlayId, + storage: Arc>, + //repos: HashMap, +} +impl Store { + pub fn get_store_repo(&self) -> &StoreRepo { + &self.store_repo + } + + pub fn get_store_readcap(&self) -> &ReadCap { + &self.store_readcap + } + + pub fn get_store_readcap_secret(&self) -> &ReadCapSecret { + &self.store_readcap.key + } + + /// Load a block from the storage. + pub fn get(&self, id: &BlockId) -> Result { + self.storage + .read() + .map_err(|_| StorageError::BackendError)? + .get(&self.overlay_id, id) + } + + /// Save a block to the storage. + pub fn put(&self, block: &Block) -> Result { + self.storage + .write() + .map_err(|_| StorageError::BackendError)? + .put(&self.overlay_id, block) + } + + /// Delete a block from the storage. + pub fn del(&self, id: &BlockId) -> Result { + self.storage + .write() + .map_err(|_| StorageError::BackendError)? + .del(&self.overlay_id, id) + } + + /// number of Blocks in the storage + pub fn len(&self) -> Result { + self.storage + .read() + .map_err(|_| StorageError::BackendError)? + .len() + } + + pub fn create_repo_default( + self: Box, + creator: &UserId, + creator_priv_key: &PrivKey, + ) -> Result<(Repo, Vec<(Commit, Vec)>), NgError> { + let mut events = Vec::with_capacity(6); + + // creating the Repository commit + + let (repo_priv_key, repo_pub_key) = generate_keypair(); + + //let overlay = store_repo.overlay_id_for_read_purpose(); + + let repository = Repository::V0(RepositoryV0 { + id: repo_pub_key, + verification_program: vec![], + creator: None, + metadata: vec![], + }); + + let repository_commit_body = CommitBody::V0(CommitBodyV0::Repository(repository.clone())); + + let repository_commit = Commit::new_with_body_acks_deps_and_save( + &repo_priv_key, + &repo_pub_key, + repo_pub_key, + QuorumType::NoSigning, + vec![], + vec![], + repository_commit_body, + &self, + )?; + + log_debug!("REPOSITORY COMMIT {}", repository_commit); + + let repository_commit_ref = repository_commit.reference().unwrap(); + + let (topic_priv_key, topic_pub_key) = generate_keypair(); + + // creating the RootBranch commit, acks to Repository commit + + let repo_write_cap_secret = SymKey::random(); + + let root_branch_commit_body = + CommitBody::V0(CommitBodyV0::RootBranch(RootBranch::V0(RootBranchV0 { + id: repo_pub_key, + repo: repository_commit_ref.clone(), + store: (&self.store_repo).into(), + store_sig: None, //TODO: the store signature + topic: topic_pub_key, + topic_privkey: Branch::encrypt_topic_priv_key( + &topic_priv_key, + topic_pub_key, + repo_pub_key, + &repo_write_cap_secret, + ), + inherit_perms_users_and_quorum_from_store: None, + quorum: None, + reconciliation_interval: RelTime::None, + owners: vec![creator.clone()], + metadata: vec![], + }))); + + let root_branch_commit = Commit::new_with_body_acks_deps_and_save( + &repo_priv_key, + &repo_pub_key, + repo_pub_key, + QuorumType::NoSigning, + vec![], + vec![repository_commit_ref.clone()], + root_branch_commit_body, + &self, + )?; + + log_debug!("ROOT_BRANCH COMMIT {}", root_branch_commit); + let root_branch_readcap = root_branch_commit.reference().unwrap(); + let root_branch_readcap_id = root_branch_readcap.id; + // adding the 2 events for the Repository and Rootbranch commits + + //peer_last_seq_num += 1; + events.push((repository_commit, vec![])); + // events.push(Event::new( + // publisher_peer, + // peer_last_seq_num, + // &repository_commit, + // &vec![], + // topic_pub_key, + // root_branch_commit.key().unwrap(), + // &topic_priv_key, + // store, + // )?); + + //peer_last_seq_num += 1; + events.push((root_branch_commit, vec![])); + // events.push(Event::new( + // publisher_peer, + // peer_last_seq_num, + // &root_branch_commit, + // &vec![], + // topic_pub_key, + // root_branch_commit.key().unwrap(), + // &topic_priv_key, + // storage, + // )?); + + // creating the main branch + + let (main_branch_priv_key, main_branch_pub_key) = generate_keypair(); + + let (main_branch_topic_priv_key, main_branch_topic_pub_key) = generate_keypair(); + + let main_branch_commit_body = CommitBody::V0(CommitBodyV0::Branch(Branch::V0(BranchV0 { + id: main_branch_pub_key, + content_type: BranchContentType::None, + repo: repository_commit_ref.clone(), + root_branch_readcap_id, + topic: main_branch_topic_pub_key, + topic_privkey: Branch::encrypt_topic_priv_key( + &main_branch_topic_priv_key, + main_branch_topic_pub_key, + main_branch_pub_key, + &repo_write_cap_secret, + ), + metadata: vec![], + }))); + + let main_branch_commit = Commit::new_with_body_acks_deps_and_save( + &main_branch_priv_key, + &main_branch_pub_key, + main_branch_pub_key, + QuorumType::NoSigning, + vec![], + vec![], + main_branch_commit_body, + &self, + )?; + let branch_read_cap = main_branch_commit.reference().unwrap(); + let branch_read_cap_id = branch_read_cap.id; + + log_debug!("MAIN BRANCH COMMIT {}", main_branch_commit); + + // adding the event for the Branch commit + + // peer_last_seq_num += 1; + events.push((main_branch_commit, vec![])); + // events.push(Event::new( + // publisher_peer, + // peer_last_seq_num, + // &main_branch_commit, + // &vec![], + // main_branch_topic_pub_key, + // main_branch_commit.key().unwrap(), + // &main_branch_topic_priv_key, + // storage, + // )?); + + // creating the AddBranch commit (on root_branch), deps to the RootBranch commit + // author is the owner + + let add_branch_commit_body = + CommitBody::V0(CommitBodyV0::AddBranch(AddBranch::V0(AddBranchV0 { + branch_type: BranchType::Main, + topic_id: main_branch_topic_pub_key, + branch_read_cap: branch_read_cap.clone(), + }))); + + let add_branch_commit = Commit::new_with_body_acks_deps_and_save( + creator_priv_key, + creator, + repo_pub_key, + QuorumType::Owners, + vec![root_branch_readcap.clone()], + vec![], + add_branch_commit_body, + &self, + )?; + + log_debug!("ADD_BRANCH COMMIT {}", add_branch_commit); + + // TODO: optional AddMember and AddPermission, that should be added as deps to the SynSignature below (and to the commits of the SignatureContent) + // using the creator as author (and incrementing their peer's seq_num) + + // preparing the threshold keys for the unique owner + let mut rng = rand::thread_rng(); + let sk_set = SecretKeySet::random(0, &mut rng); + let pk_set = sk_set.public_keys(); + + let sk_share = sk_set.secret_key_share(0); + + // creating signature for RootBranch, AddBranch and Branch commits + // signed with owner threshold signature (threshold = 0) + + let signature_content = SignatureContent::V0(SignatureContentV0 { + commits: vec![ + root_branch_readcap_id, + add_branch_commit.id().unwrap(), + branch_read_cap_id, + ], + }); + + let signature_content_ser = serde_bare::to_vec(&signature_content).unwrap(); + let sig_share = sk_share.sign(signature_content_ser); + let sig = pk_set + .combine_signatures([(0, &sig_share)]) + .map_err(|_| NgError::IncompleteSignature)?; + + let threshold_sig = ThresholdSignatureV0::Owners((sig)); + + // creating root certificate of the repo + + let cert_content = CertificateContentV0 { + previous: repository_commit_ref, + readcap_id: root_branch_readcap_id, + owners_pk_set: pk_set.public_key(), + orders_pk_sets: OrdersPublicKeySetsV0::None, + }; + + // signing the root certificate + let cert_content_ser = serde_bare::to_vec(&cert_content).unwrap(); + let sig = sign(&repo_priv_key, &repo_pub_key, &cert_content_ser)?; + let cert_sig = CertificateSignatureV0::Repo(sig); + + let cert = Certificate::V0(CertificateV0 { + content: cert_content, + sig: cert_sig, + }); + // saving the certificate + let cert_object = Object::new( + ObjectContent::V0(ObjectContentV0::Certificate(cert)), + None, + 0, + &self, + ); + let mut cert_obj_blocks = cert_object.save(&self)?; + + // finally getting the signature: + + let signature = Signature::V0(SignatureV0 { + content: signature_content, + threshold_sig, + certificate_ref: cert_object.reference().unwrap(), + }); + + // saving the signature + let sig_object = Object::new( + ObjectContent::V0(ObjectContentV0::Signature(signature)), + None, + 0, + &self, + ); + let mut sig_obj_blocks = sig_object.save(&self)?; + + // keeping the Secret Key Share of the owner + let signer_cap = SignerCap { + repo: repo_pub_key, + epoch: root_branch_readcap_id, + owner: Some(threshold_crypto::serde_impl::SerdeSecret(sk_share)), + total_order: None, + partial_order: None, + }; + + let sync_signature = SyncSignature::V0(sig_object.reference().unwrap()); + + // creating the SyncSignature for the root_branch with deps to the AddBranch and acks to the RootBranch commit as it is its direct causal future. + let sync_sig_commit_body = CommitBody::V0(CommitBodyV0::SyncSignature(sync_signature)); + + let sync_sig_on_root_branch_commit = Commit::new_with_body_acks_deps_and_save( + creator_priv_key, + creator, + repo_pub_key, + QuorumType::IamTheSignature, + vec![add_branch_commit.reference().unwrap()], + vec![root_branch_readcap], + sync_sig_commit_body.clone(), + &self, + )?; + + // adding the event for the sync_sig_on_root_branch_commit + + let mut additional_blocks = Vec::with_capacity( + cert_obj_blocks.len() + sig_obj_blocks.len() + add_branch_commit.blocks().len(), + ); + additional_blocks.extend(cert_obj_blocks.iter()); + additional_blocks.extend(sig_obj_blocks.iter()); + additional_blocks.extend(add_branch_commit.blocks().iter()); + + //peer_last_seq_num += 1; + events.push((sync_sig_on_root_branch_commit, additional_blocks)); + // events.push(Event::new( + // publisher_peer, + // peer_last_seq_num, + // &sync_sig_on_root_branch_commit, + // &additional_blocks, + // topic_pub_key, + // root_branch_commit.key().unwrap(), + // &topic_priv_key, + // storage, + // )?); + + // creating the SyncSignature for the main branch with deps to the Branch commit and acks also to this commit as it is its direct causal future. + + let sync_sig_on_main_branch_commit = Commit::new_with_body_acks_deps_and_save( + creator_priv_key, + creator, + main_branch_pub_key, + QuorumType::IamTheSignature, + vec![branch_read_cap.clone()], + vec![branch_read_cap], + sync_sig_commit_body, + &self, + )?; + + // adding the event for the sync_sig_on_main_branch_commit + + let mut additional_blocks = + Vec::with_capacity(cert_obj_blocks.len() + sig_obj_blocks.len()); + additional_blocks.append(&mut cert_obj_blocks); + additional_blocks.append(&mut sig_obj_blocks); + + // peer_last_seq_num += 1; + events.push((sync_sig_on_main_branch_commit, additional_blocks)); + // events.push(Event::new( + // publisher_peer, + // peer_last_seq_num, + // &sync_sig_on_main_branch_commit, + // &additional_blocks, + // main_branch_topic_pub_key, + // main_branch_commit.key().unwrap(), + // &main_branch_topic_priv_key, + // storage, + // )?); + + // TODO: add the CertificateRefresh event on main branch + + // += 1; + + // preparing the Repo + + let repo = Repo { + id: repo_pub_key, + repo_def: repository, + signer: Some(signer_cap), + members: HashMap::new(), + store: self, + }; + + //let repo_ref = self.repos.entry(repo_pub_key).or_insert(repo); + Ok((repo, events)) + } + + pub fn new( + store_repo: StoreRepo, + store_readcap: ReadCap, + storage: Arc>, + ) -> Self { + Self { + store_repo, + store_readcap, + overlay_id: store_repo.overlay_id_for_storage_purpose(), + storage, + //repos: HashMap::new(), + } + } + + #[cfg(test)] + #[allow(deprecated)] + pub fn dummy_public_v0() -> Box { + use crate::block_storage::HashMapBlockStorage; + let store_repo = StoreRepo::dummy_public_v0(); + let store_readcap = ReadCap::dummy(); + //let storage = Box::new() as Box; + Box::new(Self::new( + store_repo, + store_readcap, + Arc::new(RwLock::new(HashMapBlockStorage::new())) + as Arc>, + )) + } + + #[cfg(test)] + pub fn dummy_with_key(repo_pubkey: PubKey) -> Box { + use crate::block_storage::HashMapBlockStorage; + let store_repo = StoreRepo::dummy_with_key(repo_pubkey); + let store_readcap = ReadCap::dummy(); + //let storage = Box::new() as Box; + Box::new(Self::new( + store_repo, + store_readcap, + Arc::new(RwLock::new(HashMapBlockStorage::new())) + as Arc>, + )) + } +} diff --git a/ng-repo/src/types.rs b/ng-repo/src/types.rs index bb719c9..3b325b3 100644 --- a/ng-repo/src/types.rs +++ b/ng-repo/src/types.rs @@ -669,18 +669,46 @@ impl StoreRepo { } #[cfg(test)] #[allow(deprecated)] - pub fn dummy_public_v0() -> (Self, SymKey) { - let readcap = SymKey::dummy(); + pub fn dummy_public_v0() -> Self { let store_pubkey = PubKey::nil(); - ( - StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey)), - readcap, - ) + StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey)) + } + #[cfg(test)] + pub fn dummy_with_key(repo_pubkey: PubKey) -> Self { + StoreRepo::V0(StoreRepoV0::PublicStore(repo_pubkey)) } pub fn overlay_id_for_read_purpose(&self) -> OverlayId { - let store_overlay: StoreOverlay = self.into(); - store_overlay.overlay_id_for_read_purpose() + //let store_overlay: StoreOverlay = self.into(); + //store_overlay.overlay_id_for_read_purpose() + OverlayId::outer(self.repo_id()) + } + + // pub fn overlay_id_for_storage_purpose( + // &self, + // store_overlay_branch_readcap_secret: Option, + // ) -> OverlayId { + // match self { + // Self::V0(StoreRepoV0::PublicStore(id)) + // | Self::V0(StoreRepoV0::ProtectedStore(id)) + // | Self::V0(StoreRepoV0::Group(id)) + // | Self::V0(StoreRepoV0::PrivateStore(id)) => self.overlay_id_for_read_purpose(), + // Self::V0(StoreRepoV0::Dialog(d)) => OverlayId::inner( + // &d.0, + // store_overlay_branch_readcap_secret + // .expect("Dialog needs store_overlay_branch_readcap_secret"), + // ), + // } + // } + + pub fn overlay_id_for_storage_purpose(&self) -> OverlayId { + match self { + Self::V0(StoreRepoV0::PublicStore(id)) + | Self::V0(StoreRepoV0::ProtectedStore(id)) + | Self::V0(StoreRepoV0::Group(id)) + | Self::V0(StoreRepoV0::PrivateStore(id)) => self.overlay_id_for_read_purpose(), + Self::V0(StoreRepoV0::Dialog(d)) => OverlayId::Inner(d.1.clone()), + } } } @@ -1628,7 +1656,7 @@ pub enum WalletUpdate { V0(WalletUpdateV0), } -/// Updates the ReadCap of the public and protected sites (and potentially also Group stores) +/// Updates the ReadCap of the public, protected sites, Group and Dialog stores of the User /// /// DEPS to the previous ones. /// this is used to speedup joining the overlay of such stores, for new devices on new brokers @@ -1636,7 +1664,7 @@ pub enum WalletUpdate { #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)] pub struct StoreUpdateV0 { // id of the store. - pub id: PubKey, + pub store: StoreRepo, pub store_read_cap: ReadCap, diff --git a/ng-storage-rocksdb/src/block_storage.rs b/ng-storage-rocksdb/src/block_storage.rs index 4a1599b..d146ff2 100644 --- a/ng-storage-rocksdb/src/block_storage.rs +++ b/ng-storage-rocksdb/src/block_storage.rs @@ -7,6 +7,7 @@ // notice may not be copied, modified, or distributed except // according to those terms. +use ng_repo::block_storage::BlockStorage; use ng_repo::errors::StorageError; use ng_repo::types::*; use ng_repo::utils::*; @@ -17,976 +18,91 @@ use std::sync::{Arc, RwLock}; use serde::{Deserialize, Serialize}; use serde_bare::error::Error; -/* -#[derive(Debug)] -pub struct LmdbBlockStorage { - /// the main store where all the repo blocks are stored - main_store: SingleStore, - /// store for the pin boolean, recently_used timestamp, and synced boolean - meta_store: SingleStore, - /// store for the expiry timestamp - expiry_store: MultiIntegerStore, - /// store for the LRU list - recently_used_store: MultiIntegerStore, - /// the opened environment so we can create new transactions - environment: Arc>>, -} - -// TODO: versioning V0 -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -struct BlockMeta { - pub pin: bool, - pub last_used: Timestamp, - pub synced: bool, -} - -impl BlockStorage for LmdbBlockStorage { - /// Retrieves a block from the storage backend. - fn get(&self, block_id: &BlockId) -> Result { - let lock = self.environment.read().unwrap(); - let reader = lock.read().unwrap(); - let block_id_ser = serde_bare::to_vec(&block_id).unwrap(); - let block_ser_res = self.main_store.get(&reader, block_id_ser.clone()); - match block_ser_res { - Err(e) => Err(StorageError::BackendError), - Ok(None) => Err(StorageError::NotFound), - Ok(Some(block_ser)) => { - // updating recently_used - // first getting the meta for this BlockId - let meta_ser = self.meta_store.get(&reader, block_id_ser.clone()).unwrap(); - match meta_ser { - Some(meta_value) => { - let mut meta = - serde_bare::from_slice::(&meta_value.to_bytes().unwrap()) - .unwrap(); - if meta.synced { - let mut writer = lock.write().unwrap(); - let now = now_timestamp(); - if !meta.pin { - // we remove the previous timestamp (last_used) from recently_used_store - self.remove_from_lru(&mut writer, &block_id_ser, &meta.last_used) - .unwrap(); - // we add an entry to recently_used_store with now - self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap(); - } - // we save the new meta (with last_used:now) - meta.last_used = now; - let new_meta_ser = serde_bare::to_vec(&meta).unwrap(); - self.meta_store - .put( - &mut writer, - block_id_ser, - &Value::Blob(new_meta_ser.as_slice()), - ) - .unwrap(); - // commit - writer.commit().unwrap(); - } - } - _ => {} // there is no meta. we do nothing since we start to record LRU only once synced == true. - } - - match serde_bare::from_slice::(&block_ser.to_bytes().unwrap()) { - Err(_e) => Err(StorageError::InvalidValue), - Ok(mut o) => { - if o.get_and_save_id() != *block_id { - log_debug!( - "Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}", - block_id, - o.id(), - o - ); - panic!("CORRUPTION OF DATA !"); - } - Ok(o) - } - } - } - } - } - - /// Adds a block in the storage backend. - /// The block is persisted to disk. - /// Returns the BlockId of the Block. - fn put(&self, block: &Block) -> Result { - let block_ser = serde_bare::to_vec(&block).unwrap(); - - let block_id = block.id(); - let block_id_ser = serde_bare::to_vec(&block_id).unwrap(); - - let lock = self.environment.read().unwrap(); - let mut writer = lock.write().unwrap(); - - // TODO: check if the block is already in store? if yes, don't put it again. - // I didnt do it yet because it is extra cost. surely a get on the store is lighter than a put - // but doing a get in additing to a put for every call, is probably even costlier. better to deal with that at the higher level - - self.main_store - .put( - &mut writer, - &block_id_ser, - &Value::Blob(block_ser.as_slice()), - ) - .unwrap(); - - // if it has an expiry, adding the BlockId to the expiry_store - match block.expiry() { - Some(expiry) => { - self.expiry_store - .put(&mut writer, expiry, &Value::Blob(block_id_ser.as_slice())) - .unwrap(); - } - _ => {} - } - writer.commit().unwrap(); - Ok(block_id) - } - - /// Removes the block from the storage backend. - /// The removed block is returned, so it can be inspected. - /// Also returned is the approximate size of of free space that was reclaimed. - fn del(&self, block_id: &BlockId) -> Result<(Block, usize), StorageError> { - let lock = self.environment.read().unwrap(); - let mut writer = lock.write().unwrap(); - let block_id_ser = serde_bare::to_vec(&block_id).unwrap(); - // retrieving the block itself (we need the expiry) - let block_ser = self - .main_store - .get(&writer, block_id_ser.clone()) - .unwrap() - .ok_or(StorageError::NotFound)?; - let slice = block_ser.to_bytes().unwrap(); - let block = serde_bare::from_slice::(&slice).unwrap(); //FIXME propagate error? - let meta_res = self.meta_store.get(&writer, block_id_ser.clone()).unwrap(); - if meta_res.is_some() { - let meta = serde_bare::from_slice::(&meta_res.unwrap().to_bytes().unwrap()) - .unwrap(); - if meta.last_used != 0 { - self.remove_from_lru(&mut writer, &block_id_ser.clone(), &meta.last_used) - .unwrap(); - } - // removing the meta - self.meta_store - .delete(&mut writer, block_id_ser.clone()) - .unwrap(); - } - // delete block from main_store - self.main_store - .delete(&mut writer, block_id_ser.clone()) - .unwrap(); - // remove BlockId from expiry_store, if any expiry - match block.expiry() { - Some(expiry) => { - self.expiry_store - .delete( - &mut writer, - expiry, - &Value::Blob(block_id_ser.clone().as_slice()), - ) - .unwrap(); - } - _ => {} - } +use rocksdb::{ + ColumnFamily, ColumnFamilyDescriptor, Direction, Env, ErrorKind, IteratorMode, Options, + SingleThreaded, TransactionDB, TransactionDBOptions, DB, +}; - writer.commit().unwrap(); - Ok((block, slice.len())) - } +pub struct RocksDbBlockStorage { + /// the main store where all the properties of keys are stored + db: TransactionDB, + /// path for the storage backend data + path: String, } -impl LmdbBlockStorage { - /// Opens the store and returns a BlockStorage object that should be kept and used to call put/get/delete/pin +impl RocksDbBlockStorage { + /// Opens the store and returns a KCVStore object that should be kept and used to manipulate the properties /// The key is the encryption key for the data at rest. - pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result { - let mut manager = Manager::::singleton().write().unwrap(); - let shared_rkv = manager - .get_or_create(path, |path| { - //Rkv::new::(path) // use this instead to disable encryption - Rkv::with_encryption_key_and_mapsize::(path, key, 1 * 1024 * 1024 * 1024) - }) - .map_err(|e| { - log_debug!("open LMDB failed: {}", e); - StorageError::BackendError - })?; - let env = shared_rkv.read().unwrap(); - - log_debug!( - "created env with LMDB Version: {} key: {}", - env.version(), - hex::encode(&key) + pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result { + let mut opts = Options::default(); + opts.set_use_fsync(true); + opts.create_if_missing(true); + opts.create_missing_column_families(true); + let env = Env::enc_env(key).unwrap(); + opts.set_env(&env); + let tx_options = TransactionDBOptions::new(); + let db: TransactionDB = + TransactionDB::open_cf(&opts, &tx_options, &path, vec!["cf0", "cf1"]).unwrap(); + + log_info!( + "created blockstorage with Rocksdb Version: {}", + Env::version() ); - let main_store = env.open_single("main", StoreOptions::create()).unwrap(); - let meta_store = env.open_single("meta", StoreOptions::create()).unwrap(); - let mut opts = StoreOptions::::create(); - opts.flags.set(DatabaseFlags::DUP_FIXED, true); - let expiry_store = env.open_multi_integer("expiry", opts).unwrap(); - let recently_used_store = env.open_multi_integer("recently_used", opts).unwrap(); - - Ok(LmdbBlockStorage { - environment: shared_rkv.clone(), - main_store, - meta_store, - expiry_store, - recently_used_store, + Ok(RocksDbBlockStorage { + db: db, + path: path.to_str().unwrap().to_string(), }) } - //FIXME: use BlockId, not ObjectId. this is a block level operation - /// Pins the object - pub fn pin(&self, object_id: &ObjectId) -> Result<(), StorageError> { - self.set_pin(object_id, true) - } - - //FIXME: use BlockId, not ObjectId. this is a block level operation - /// Unpins the object - pub fn unpin(&self, object_id: &ObjectId) -> Result<(), StorageError> { - self.set_pin(object_id, false) - } - - //FIXME: use BlockId, not ObjectId. this is a block level operation - /// Sets the pin for that Object. if add is true, will add the pin. if false, will remove the pin. - /// A pin on an object prevents it from being removed when the store is making some disk space by using the LRU. - /// A pin does not override the expiry. If expiry is set and is reached, the obejct will be deleted, no matter what. - pub fn set_pin(&self, object_id: &ObjectId, add: bool) -> Result<(), StorageError> { - let lock = self.environment.read().unwrap(); - let mut writer = lock.write().unwrap(); - let obj_id_ser = serde_bare::to_vec(&object_id).unwrap(); - let meta_ser = self.meta_store.get(&writer, &obj_id_ser).unwrap(); - let mut meta; - - // if adding a pin, if there is a meta (if already pinned, return) and is synced, remove the last_used timestamp from recently_used_store - // if no meta, create it with pin:true, synced: false - // if removing a pin (if pin already removed, return), if synced, add an entry to recently_used_store with the last_used timestamp (as found in meta, dont use now) - - match meta_ser { - Some(meta_value) => { - meta = - serde_bare::from_slice::(&meta_value.to_bytes().unwrap()).unwrap(); - - if add == meta.pin { - // pinning while already pinned, or unpinning while already unpinned. NOP - return Ok(()); - }; - - meta.pin = add; - - if meta.synced { - if add { - // we remove the previous timestamp (last_used) from recently_used_store - self.remove_from_lru(&mut writer, &obj_id_ser, &meta.last_used) - .unwrap(); - } else { - // we add an entry to recently_used_store with last_used - self.add_to_lru(&mut writer, &obj_id_ser, &meta.last_used) - .unwrap(); - } - } - } - None => { - if add { - meta = BlockMeta { - pin: true, - synced: false, - last_used: 0, - } - } else { - // there is no meta, and user wants to unpin, so let's leave everything as it is. - return Ok(()); - } - } - } - let new_meta_ser = serde_bare::to_vec(&meta).unwrap(); - self.meta_store - .put( - &mut writer, - obj_id_ser, - &Value::Blob(new_meta_ser.as_slice()), - ) - .unwrap(); - // commit - writer.commit().unwrap(); - - Ok(()) - } - - //FIXME: use BlockId, not ObjectId. this is a block level operation - /// the broker calls this method when the block has been retrieved/synced by enough peers and it - /// can now be included in the LRU for potential garbage collection. - /// If this method has not been called on a block, it will be kept in the store and will not enter LRU. - pub fn has_been_synced(&self, block_id: &BlockId, when: Option) -> Result<(), Error> { - let lock = self.environment.read().unwrap(); - let mut writer = lock.write().unwrap(); - let block_id_ser = serde_bare::to_vec(&block_id).unwrap(); - let meta_ser = self.meta_store.get(&writer, block_id_ser.clone()).unwrap(); - let mut meta; - let now = match when { - None => now_timestamp(), - Some(w) => w, - }; - // get the meta. if no meta, it is ok, we will create it after (with pin:false and synced:true) - // if already synced, return - // update the meta with last_used:now and synced:true - // if pinned, save and return - // otherwise add an entry to recently_used_store with now - - match meta_ser { - Some(meta_value) => { - meta = - serde_bare::from_slice::(&meta_value.to_bytes().unwrap()).unwrap(); - - if meta.synced { - // already synced. NOP - return Ok(()); - }; - - meta.synced = true; - meta.last_used = now; - - if !meta.pin { - // we add an entry to recently_used_store with now - log_debug!("adding to LRU"); - self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap(); - } - } - None => { - meta = BlockMeta { - pin: false, - synced: true, - last_used: now, - }; - log_debug!("adding to LRU also"); - self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap(); - } - } - let new_meta_ser = serde_bare::to_vec(&meta).unwrap(); - self.meta_store - .put( - &mut writer, - block_id_ser, - &Value::Blob(new_meta_ser.as_slice()), - ) - .unwrap(); - // commit - writer.commit().unwrap(); - - Ok(()) - } - - /// Removes all the blocks that have expired. - /// The broker should call this method periodically. - pub fn remove_expired(&self) -> Result<(), Error> { - let mut block_ids: Vec = vec![]; - - { - let lock = self.environment.read().unwrap(); - let reader = lock.read().unwrap(); - - let mut iter = self - .expiry_store - .iter_prev_dup_from(&reader, now_timestamp()) - .unwrap(); - - while let Some(Ok(mut sub_iter)) = iter.next() { - while let Some(Ok(k)) = sub_iter.next() { - //log_debug!("removing {:?} {:?}", k.0, k.1); - let block_id = serde_bare::from_slice::(k.1).unwrap(); - block_ids.push(block_id); - } - } - } - for block_id in block_ids { - self.del(&block_id).unwrap(); - } - Ok(()) - } - - /// Removes some blocks that haven't been used for a while, reclaiming some space on disk. - /// The oldest are removed first, until the total amount of data removed is at least equal to size, - /// or the LRU list became empty. The approximate size of the storage space that was reclaimed is returned. - pub fn remove_least_used(&self, size: usize) -> usize { - let mut block_ids: Vec = vec![]; - let mut total: usize = 0; - - { - let lock = self.environment.read().unwrap(); - let reader = lock.read().unwrap(); - - let mut iter = self.recently_used_store.iter_start(&reader).unwrap(); - - while let Some(Ok(entry)) = iter.next() { - let block_id = - serde_bare::from_slice::(entry.1.to_bytes().unwrap().as_slice()) - .unwrap(); - block_ids.push(block_id); - } - } - for block_id in block_ids { - let (block, block_size) = self.del(&block_id).unwrap(); - log_debug!("removed {:?}", block_id); - total += block_size; - if total >= size { - break; - } - } - total - } - - fn remove_from_lru( - &self, - writer: &mut Writer, - block_id_ser: &Vec, - time: &Timestamp, - ) -> Result<(), StoreError> { - self.recently_used_store - .delete(writer, *time, &Value::Blob(block_id_ser.as_slice())) - } - - fn add_to_lru( - &self, - writer: &mut Writer, - block_id_ser: &Vec, - time: &Timestamp, - ) -> Result<(), StoreError> { - let mut flag = LmdbWriteFlags::empty(); - flag.set(WriteFlags::APPEND_DUP, true); - self.recently_used_store.put_with_flags( - writer, - *time, - &Value::Blob(block_id_ser.as_slice()), - flag, - ) - } - - fn list_all(&self) { - let lock = self.environment.read().unwrap(); - let reader = lock.read().unwrap(); - log_debug!("MAIN"); - let mut iter = self.main_store.iter_start(&reader).unwrap(); - while let Some(Ok(entry)) = iter.next() { - log_debug!("{:?} {:?}", entry.0, entry.1) - } - log_debug!("META"); - let mut iter2 = self.meta_store.iter_start(&reader).unwrap(); - while let Some(Ok(entry)) = iter2.next() { - log_debug!("{:?} {:?}", entry.0, entry.1) - } - log_debug!("EXPIRY"); - let mut iter3 = self.expiry_store.iter_start(&reader).unwrap(); - while let Some(Ok(entry)) = iter3.next() { - log_debug!("{:?} {:?}", entry.0, entry.1) - } - log_debug!("LRU"); - let mut iter4 = self.recently_used_store.iter_start(&reader).unwrap(); - while let Some(Ok(entry)) = iter4.next() { - log_debug!("{:?} {:?}", entry.0, entry.1) - } + fn compute_key(overlay: &OverlayId, id: &BlockId) -> Vec { + let mut key: Vec = Vec::with_capacity(34 + 33); + key.append(&mut serde_bare::to_vec(overlay).unwrap()); + key.append(&mut serde_bare::to_vec(id).unwrap()); + key } } -*/ -#[cfg(test)] -mod test { - - use ng_repo::block_storage::*; - use ng_repo::log::*; - use ng_repo::types::*; - use ng_repo::utils::*; - #[allow(unused_imports)] - use std::time::Duration; - #[allow(unused_imports)] - use std::{fs, thread}; - use tempfile::Builder; - /* - #[test] - pub fn test_remove_least_used() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - let key: [u8; 32] = [0; 32]; - fs::create_dir_all(root.path()).unwrap(); - log_debug!("{}", root.path().to_str().unwrap()); - let mut store = LmdbBlockStorage::open(root.path(), key).unwrap(); - let mut now = now_timestamp(); - now -= 200; - // TODO: fix the LMDB bug that is triggered with x max set to 86 !!! - for x in 1..85 { - let block = Block::new( - Vec::new(), - ObjectDeps::ObjectIdList(Vec::new()), - None, - vec![x; 10], - None, - ); - let block_id = store.put(&block).unwrap(); - log_debug!("#{} -> objId {:?}", x, block_id); - store - .has_been_synced(&block_id, Some(now + x as u32)) - .unwrap(); - } - - let ret = store.remove_least_used(200); - log_debug!("removed {}", ret); - assert_eq!(ret, 208) - //store.list_all(); - } - - #[test] - pub fn test_set_pin() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - let key: [u8; 32] = [0; 32]; - fs::create_dir_all(root.path()).unwrap(); - log_debug!("{}", root.path().to_str().unwrap()); - let mut store = LmdbBlockStorage::open(root.path(), key).unwrap(); - let mut now = now_timestamp(); - now -= 200; - // TODO: fix the LMDB bug that is triggered with x max set to 86 !!! - for x in 1..100 { - let block = Block::new( - Vec::new(), - ObjectDeps::ObjectIdList(Vec::new()), - None, - vec![x; 10], - None, - ); - let obj_id = store.put(&block).unwrap(); - log_debug!("#{} -> objId {:?}", x, obj_id); - store.set_pin(&obj_id, true).unwrap(); - store - .has_been_synced(&obj_id, Some(now + x as u32)) - .unwrap(); - } - - let ret = store.remove_least_used(200); - log_debug!("removed {}", ret); - assert_eq!(ret, 0); - - store.list_all(); - } - */ - #[test] - pub fn test_get_valid_value_size() { - assert_eq!(store_valid_value_size(0), 4096); - assert_eq!(store_valid_value_size(2), 4096); - assert_eq!(store_valid_value_size(4096 - 1), 4096); - assert_eq!(store_valid_value_size(4096), 4096); - assert_eq!(store_valid_value_size(4096 + 1), 4096 + 4096); - assert_eq!(store_valid_value_size(4096 + 4096), 4096 + 4096); - assert_eq!(store_valid_value_size(4096 + 4096 + 1), 4096 + 4096 + 4096); - assert_eq!( - store_valid_value_size(4096 + 4096 + 4096), - 4096 + 4096 + 4096 - ); - assert_eq!( - store_valid_value_size(4096 + 4096 + 4096 + 1), - 4096 + 4096 + 4096 + 4096 - ); - assert_eq!(store_valid_value_size(4096 + 4096 * 255), 4096 + 4096 * 255); - assert_eq!( - store_valid_value_size(4096 + 4096 * 255 + 1), - 4096 + 4096 * 255 - ); - } - /* - #[test] - pub fn test_remove_expired() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - let key: [u8; 32] = [0; 32]; - fs::create_dir_all(root.path()).unwrap(); - log_debug!("{}", root.path().to_str().unwrap()); - let mut store = LmdbBlockStorage::open(root.path(), key).unwrap(); - - let now = now_timestamp(); - let list = [ - now - 10, - now - 6, - now - 6, - now - 3, - now - 2, - now - 1, //#5 should be removed, and above - now + 3, - now + 4, - now + 4, - now + 5, - now + 10, - ]; - let mut block_ids: Vec = Vec::with_capacity(11); - log_debug!("now {}", now); - - let mut i = 0u8; - for expiry in list { - //let i: u8 = (expiry + 10 - now).try_into().unwrap(); - let block = Block::new( - Vec::new(), - ObjectDeps::ObjectIdList(Vec::new()), - Some(expiry), - [i].to_vec(), - None, - ); - let block_id = store.put(&block).unwrap(); - log_debug!("#{} -> objId {:?}", i, block_id); - block_ids.push(block_id); - i += 1; - } - - store.remove_expired().unwrap(); - - assert!(store.get(block_ids.get(0).unwrap()).is_err()); - assert!(store.get(block_ids.get(1).unwrap()).is_err()); - assert!(store.get(block_ids.get(2).unwrap()).is_err()); - assert!(store.get(block_ids.get(5).unwrap()).is_err()); - assert!(store.get(block_ids.get(6).unwrap()).is_ok()); - assert!(store.get(block_ids.get(7).unwrap()).is_ok()); - - //store.list_all(); +impl BlockStorage for RocksDbBlockStorage { + /// Load a block from the storage. + fn get(&self, overlay: &OverlayId, id: &BlockId) -> Result { + let block_ser = self + .db + .get(Self::compute_key(overlay, id)) + .map_err(|_e| StorageError::BackendError)? + .ok_or(StorageError::NotFound)?; + let block: Block = serde_bare::from_slice(&block_ser)?; + Ok(block) } - #[test] - pub fn test_remove_all_expired() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - let key: [u8; 32] = [0; 32]; - fs::create_dir_all(root.path()).unwrap(); - log_debug!("{}", root.path().to_str().unwrap()); - let mut store = LmdbBlockStorage::open(root.path(), key).unwrap(); - - let now = now_timestamp(); - let list = [ - now - 10, - now - 6, - now - 6, - now - 3, - now - 2, - now - 2, //#5 should be removed, and above - ]; - let mut block_ids: Vec = Vec::with_capacity(6); - log_debug!("now {}", now); - - let mut i = 0u8; - for expiry in list { - //let i: u8 = (expiry + 10 - now).try_into().unwrap(); - let block = Block::new( - Vec::new(), - ObjectDeps::ObjectIdList(Vec::new()), - Some(expiry), - [i].to_vec(), - None, - ); - let block_id = store.put(&block).unwrap(); - log_debug!("#{} -> objId {:?}", i, block_id); - block_ids.push(block_id); - i += 1; - } - - store.remove_expired().unwrap(); - - assert!(store.get(block_ids.get(0).unwrap()).is_err()); - assert!(store.get(block_ids.get(1).unwrap()).is_err()); - assert!(store.get(block_ids.get(2).unwrap()).is_err()); - assert!(store.get(block_ids.get(3).unwrap()).is_err()); - assert!(store.get(block_ids.get(4).unwrap()).is_err()); - assert!(store.get(block_ids.get(5).unwrap()).is_err()); + /// Save a block to the storage. + fn put(&self, overlay: &OverlayId, block: &Block) -> Result { + // TODO? return an error if already present in blockstorage? + let block_id = block.id(); + let block_id_ser = serde_bare::to_vec(&block_id).unwrap(); + let ser = serde_bare::to_vec(block)?; + let tx = self.db.transaction(); + tx.put(Self::compute_key(overlay, &block_id), &ser) + .map_err(|_e| StorageError::BackendError)?; + tx.commit(); + Ok(block_id) } - #[test] - pub fn test_remove_empty_expired() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - let key: [u8; 32] = [0; 32]; - fs::create_dir_all(root.path()).unwrap(); - log_debug!("{}", root.path().to_str().unwrap()); - let store = LmdbBlockStorage::open(root.path(), key).unwrap(); - store.remove_expired().unwrap(); + /// Delete a block from the storage. + fn del(&self, overlay: &OverlayId, id: &BlockId) -> Result { + let block_id_ser = serde_bare::to_vec(id).unwrap(); + let tx = self.db.transaction(); + tx.delete(Self::compute_key(overlay, id)) + .map_err(|_e| StorageError::BackendError)?; + tx.commit(); + // TODO, return real size + Ok(0) } - #[test] - pub fn test_store_block() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - - let key: [u8; 32] = [0; 32]; - fs::create_dir_all(root.path()).unwrap(); - - log_debug!("{}", root.path().to_str().unwrap()); - - let mut store = LmdbBlockStorage::open(root.path(), key).unwrap(); - - let block = Block::new( - Vec::new(), - ObjectDeps::ObjectIdList(Vec::new()), - None, - b"abc".to_vec(), - None, - ); - - let block_id = store.put(&block).unwrap(); - assert_eq!(block_id, block.id()); - - log_debug!("ObjectId: {:?}", block_id); - assert_eq!( - block_id, - Digest::Blake3Digest32([ - 155, 83, 186, 17, 95, 10, 80, 31, 111, 24, 250, 64, 8, 145, 71, 193, 103, 246, 202, - 28, 202, 144, 63, 65, 85, 229, 136, 85, 202, 34, 13, 85 - ]) - ); - - let block_res = store.get(&block_id).unwrap(); - - log_debug!("Block: {:?}", block_res); - assert_eq!(block_res.id(), block.id()); - } - - #[test] - pub fn test_rocksdb() { - let path_str = "test-env"; - let root = Builder::new().prefix(path_str).tempdir().unwrap(); - - // we set an encryption key with all zeros... for test purpose only ;) - let key: [u8; 32] = [0; 32]; - { - fs::create_dir_all(root.path()).unwrap(); - - log_debug!("{}", root.path().to_str().unwrap()); - - let mut manager = Manager::::singleton().write().unwrap(); - let shared_rkv = manager - .get_or_create(root.path(), |path| { - // Rkv::new::(path) // use this instead to disable encryption - Rkv::with_encryption_key_and_mapsize::(path, key, 1 * 1024 * 1024 * 1024) - }) - .unwrap(); - let env = shared_rkv.read().unwrap(); - - log_debug!("LMDB Version: {}", env.version()); - - let store = env.open_single("testdb", StoreOptions::create()).unwrap(); - - { - // Use a write transaction to mutate the store via a `Writer`. There can be only - // one writer for a given environment, so opening a second one will block until - // the first completes. - let mut writer = env.write().unwrap(); - - // Keys are `AsRef<[u8]>`, while values are `Value` enum instances. Use the `Blob` - // variant to store arbitrary collections of bytes. Putting data returns a - // `Result<(), StoreError>`, where StoreError is an enum identifying the reason - // for a failure. - // store.put(&mut writer, "int", &Value::I64(1234)).unwrap(); - // store - // .put(&mut writer, "uint", &Value::U64(1234_u64)) - // .unwrap(); - // store - // .put(&mut writer, "float", &Value::F64(1234.0.into())) - // .unwrap(); - // store - // .put(&mut writer, "instant", &Value::Instant(1528318073700)) - // .unwrap(); - // store - // .put(&mut writer, "boolean", &Value::Bool(true)) - // .unwrap(); - // store - // .put(&mut writer, "string", &Value::Str("Héllo, wörld!")) - // .unwrap(); - // store - // .put( - // &mut writer, - // "json", - // &Value::Json(r#"{"foo":"bar", "number": 1}"#), - // ) - // .unwrap(); - const EXTRA: usize = 2095; // + 4096 * 524280 + 0; - let key: [u8; 33] = [0; 33]; - let key2: [u8; 33] = [2; 33]; - let key3: [u8; 33] = [3; 33]; - let key4: [u8; 33] = [4; 33]; - //let value: [u8; 1977 + EXTRA] = [1; 1977 + EXTRA]; - let value = vec![1; 1977 + EXTRA]; - let value2: [u8; 1977 + 1] = [1; 1977 + 1]; - let value4: [u8; 953 + 0] = [1; 953 + 0]; - store.put(&mut writer, key, &Value::Blob(&value2)).unwrap(); - store.put(&mut writer, key2, &Value::Blob(&value2)).unwrap(); - // store.put(&mut writer, key3, &Value::Blob(&value)).unwrap(); - // store.put(&mut writer, key4, &Value::Blob(&value4)).unwrap(); - - // You must commit a write transaction before the writer goes out of scope, or the - // transaction will abort and the data won't persist. - writer.commit().unwrap(); - let reader = env.read().expect("reader"); - let stat = store.stat(&reader).unwrap(); - - log_debug!("LMDB stat page_size : {}", stat.page_size()); - log_debug!("LMDB stat depth : {}", stat.depth()); - log_debug!("LMDB stat branch_pages : {}", stat.branch_pages()); - log_debug!("LMDB stat leaf_pages : {}", stat.leaf_pages()); - log_debug!("LMDB stat overflow_pages : {}", stat.overflow_pages()); - log_debug!("LMDB stat entries : {}", stat.entries()); - } - - // { - // // Use a read transaction to query the store via a `Reader`. There can be multiple - // // concurrent readers for a store, and readers never block on a writer nor other - // // readers. - // let reader = env.read().expect("reader"); - - // // Keys are `AsRef`, and the return value is `Result, StoreError>`. - // // log_debug!("Get int {:?}", store.get(&reader, "int").unwrap()); - // // log_debug!("Get uint {:?}", store.get(&reader, "uint").unwrap()); - // // log_debug!("Get float {:?}", store.get(&reader, "float").unwrap()); - // // log_debug!("Get instant {:?}", store.get(&reader, "instant").unwrap()); - // // log_debug!("Get boolean {:?}", store.get(&reader, "boolean").unwrap()); - // // log_debug!("Get string {:?}", store.get(&reader, "string").unwrap()); - // // log_debug!("Get json {:?}", store.get(&reader, "json").unwrap()); - // log_debug!("Get blob {:?}", store.get(&reader, "blob").unwrap()); - - // // Retrieving a non-existent value returns `Ok(None)`. - // log_debug!( - // "Get non-existent value {:?}", - // store.get(&reader, "non-existent").unwrap() - // ); - - // // A read transaction will automatically close once the reader goes out of scope, - // // so isn't necessary to close it explicitly, although you can do so by calling - // // `Reader.abort()`. - // } - - // { - // // Aborting a write transaction rolls back the change(s). - // let mut writer = env.write().unwrap(); - // store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap(); - // writer.abort(); - // let reader = env.read().expect("reader"); - // log_debug!( - // "It should be None! ({:?})", - // store.get(&reader, "foo").unwrap() - // ); - // } - - // { - // // Explicitly aborting a transaction is not required unless an early abort is - // // desired, since both read and write transactions will implicitly be aborted once - // // they go out of scope. - // { - // let mut writer = env.write().unwrap(); - // store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap(); - // } - // let reader = env.read().expect("reader"); - // log_debug!( - // "It should be None! ({:?})", - // store.get(&reader, "foo").unwrap() - // ); - // } - - // { - // // Deleting a key/value pair also requires a write transaction. - // let mut writer = env.write().unwrap(); - // store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap(); - // store.put(&mut writer, "bar", &Value::Blob(b"baz")).unwrap(); - // store.delete(&mut writer, "foo").unwrap(); - - // // A write transaction also supports reading, and the version of the store that it - // // reads includes the changes it has made regardless of the commit state of that - // // transaction. - // // In the code above, "foo" and "bar" were put into the store, then "foo" was - // // deleted so only "bar" will return a result when the database is queried via the - // // writer. - // log_debug!( - // "It should be None! ({:?})", - // store.get(&writer, "foo").unwrap() - // ); - // log_debug!("Get bar ({:?})", store.get(&writer, "bar").unwrap()); - - // // But a reader won't see that change until the write transaction is committed. - // { - // let reader = env.read().expect("reader"); - // log_debug!("Get foo {:?}", store.get(&reader, "foo").unwrap()); - // log_debug!("Get bar {:?}", store.get(&reader, "bar").unwrap()); - // } - // writer.commit().unwrap(); - // { - // let reader = env.read().expect("reader"); - // log_debug!( - // "It should be None! ({:?})", - // store.get(&reader, "foo").unwrap() - // ); - // log_debug!("Get bar {:?}", store.get(&reader, "bar").unwrap()); - // } - - // // Committing a transaction consumes the writer, preventing you from reusing it by - // // failing at compile time with an error. This line would report "error[E0382]: - // // borrow of moved value: `writer`". - // // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap(); - // } - - // { - // // Clearing all the entries in the store with a write transaction. - // { - // let mut writer = env.write().unwrap(); - // store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap(); - // store.put(&mut writer, "bar", &Value::Blob(b"baz")).unwrap(); - // writer.commit().unwrap(); - // } - - // // { - // // let mut writer = env.write().unwrap(); - // // store.clear(&mut writer).unwrap(); - // // writer.commit().unwrap(); - // // } - - // // { - // // let reader = env.read().expect("reader"); - // // log_debug!( - // // "It should be None! ({:?})", - // // store.get(&reader, "foo").unwrap() - // // ); - // // log_debug!( - // // "It should be None! ({:?})", - // // store.get(&reader, "bar").unwrap() - // // ); - // // } - // } - - let stat = env.stat().unwrap(); - let info = env.info().unwrap(); - log_debug!("LMDB info map_size : {}", info.map_size()); - log_debug!("LMDB info last_pgno : {}", info.last_pgno()); - log_debug!("LMDB info last_txnid : {}", info.last_txnid()); - log_debug!("LMDB info max_readers : {}", info.max_readers()); - log_debug!("LMDB info num_readers : {}", info.num_readers()); - log_debug!("LMDB stat page_size : {}", stat.page_size()); - log_debug!("LMDB stat depth : {}", stat.depth()); - log_debug!("LMDB stat branch_pages : {}", stat.branch_pages()); - log_debug!("LMDB stat leaf_pages : {}", stat.leaf_pages()); - log_debug!("LMDB stat overflow_pages : {}", stat.overflow_pages()); - log_debug!("LMDB stat entries : {}", stat.entries()); - } - // We reopen the env and data to see if it was well saved to disk. - { - let mut manager = Manager::::singleton().write().unwrap(); - let shared_rkv = manager - .get_or_create(root.path(), |path| { - //Rkv::new::(path) // use this instead to disable encryption - Rkv::with_encryption_key_and_mapsize::(path, key, 1 * 1024 * 1024 * 1024) - }) - .unwrap(); - let env = shared_rkv.read().unwrap(); - - log_debug!("LMDB Version: {}", env.version()); - - let mut store = env.open_single("testdb", StoreOptions::default()).unwrap(); //StoreOptions::create() - - { - let reader = env.read().expect("reader"); - log_debug!( - "It should be baz! ({:?})", - store.get(&reader, "bar").unwrap() - ); - } - } - // Here the database and environment is closed, but the files are still present in the temp directory. - // uncomment this if you need time to copy them somewhere for analysis, before the temp folder get destroyed - //thread::sleep(Duration::from_millis(20000)); + /// number of Blocks in the storage + fn len(&self) -> Result { + //TODO return number of blocks + Ok(0) } - */ } diff --git a/ng-storage-rocksdb/src/kcv_storage.rs b/ng-storage-rocksdb/src/kcv_storage.rs index f569f6f..48c39cc 100644 --- a/ng-storage-rocksdb/src/kcv_storage.rs +++ b/ng-storage-rocksdb/src/kcv_storage.rs @@ -11,13 +11,15 @@ use ng_repo::kcv_storage::*; use ng_repo::errors::*; use ng_repo::log::*; +use rocksdb::DBIteratorWithThreadMode; +use std::collections::HashMap; use std::path::Path; use std::path::PathBuf; use rocksdb::{ - ColumnFamilyDescriptor, Direction, Env, ErrorKind, IteratorMode, Options, SingleThreaded, - TransactionDB, TransactionDBOptions, DB, + ColumnFamily, ColumnFamilyDescriptor, Direction, Env, ErrorKind, IteratorMode, Options, + SingleThreaded, TransactionDB, TransactionDBOptions, DB, }; pub struct RocksdbTransaction<'a> { @@ -32,6 +34,24 @@ impl<'a> RocksdbTransaction<'a> { fn tx(&self) -> &rocksdb::Transaction<'a, TransactionDB> { self.tx.as_ref().unwrap() } + fn get_iterator( + &self, + property_start: &[u8], + family: &Option, + ) -> Result, StorageError> { + Ok(match family { + Some(cf) => self.tx().iterator_cf( + self.store + .db + .cf_handle(&cf) + .ok_or(StorageError::UnknownColumnFamily)?, + IteratorMode::From(property_start, Direction::Forward), + ), + None => self + .tx() + .iterator(IteratorMode::From(property_start, Direction::Forward)), + }) + } } impl<'a> ReadTransaction for RocksdbTransaction<'a> { @@ -41,18 +61,52 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> { key_size: usize, key_prefix: Vec, suffix: Option, + family: &Option, ) -> Result, Vec)>, StorageError> { + let property_start = + RocksdbKCVStore::calc_key_start(prefix, key_size, &key_prefix, &suffix); + let iter = self.get_iterator(&property_start, &family)?; self.store - .get_all_keys_and_values(prefix, key_size, key_prefix, suffix) + .get_all_keys_and_values_(prefix, key_size, key_prefix, suffix, iter) + } + + fn get_all_properties_of_key( + &self, + prefix: u8, + key: Vec, + properties: Vec, + family: &Option, + ) -> Result>, StorageError> { + let key_size = key.len(); + let prop_values = self.get_all_keys_and_values(prefix, key_size, key, None, family)?; + Ok(RocksdbKCVStore::get_all_properties_of_key( + prop_values, + key_size, + &properties, + )) } /// Load a single value property from the store. - fn get(&self, prefix: u8, key: &Vec, suffix: Option) -> Result, StorageError> { - let property = RocksdbKCVStore::compute_property(prefix, key, suffix); - let res = self - .tx() - .get_for_update(property, true) - .map_err(|_e| StorageError::BackendError)?; + fn get( + &self, + prefix: u8, + key: &Vec, + suffix: Option, + family: &Option, + ) -> Result, StorageError> { + let property = RocksdbKCVStore::compute_property(prefix, key, &suffix); + let res = match family { + Some(cf) => self.tx().get_for_update_cf( + self.store + .db + .cf_handle(&cf) + .ok_or(StorageError::UnknownColumnFamily)?, + property, + true, + ), + None => self.tx().get_for_update(property, true), + } + .map_err(|_e| StorageError::BackendError)?; match res { Some(val) => Ok(val), None => Err(StorageError::NotFound), @@ -65,6 +119,7 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> { prefix: u8, key: &Vec, suffix: Option, + family: &Option, ) -> Result>, StorageError> { unimplemented!(); } @@ -76,21 +131,13 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> { key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - let property = RocksdbKCVStore::compute_property(prefix, key, suffix); - let exists = self - .tx() - .get_for_update(property, true) - .map_err(|_e| StorageError::BackendError)?; - match exists { - Some(stored_value) => { - if stored_value.eq(value) { - Ok(()) - } else { - Err(StorageError::DifferentValue) - } - } - None => Err(StorageError::NotFound), + let exists = self.get(prefix, key, suffix, family)?; + if exists.eq(value) { + Ok(()) + } else { + Err(StorageError::DifferentValue) } } } @@ -98,41 +145,61 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> { impl<'a> WriteTransaction for RocksdbTransaction<'a> { /// Save a property value to the store. fn put( - &mut self, + &self, prefix: u8, key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - let property = RocksdbKCVStore::compute_property(prefix, key, suffix); - self.tx() - .put(property, value) - .map_err(|_e| StorageError::BackendError)?; + let property = RocksdbKCVStore::compute_property(prefix, key, &suffix); + match family { + Some(cf) => self.tx().put_cf( + self.store + .db + .cf_handle(&cf) + .ok_or(StorageError::UnknownColumnFamily)?, + property, + value, + ), + None => self.tx().put(property, value), + } + .map_err(|_e| StorageError::BackendError)?; Ok(()) } /// Replace the property of a key (single value) to the store. fn replace( - &mut self, + &self, prefix: u8, key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - let property = RocksdbKCVStore::compute_property(prefix, key, suffix); - - self.tx() - .put(property, value) - .map_err(|_e| StorageError::BackendError)?; - - Ok(()) + self.put(prefix, key, suffix, value, family) } /// Delete a property from the store. - fn del(&mut self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError> { - let property = RocksdbKCVStore::compute_property(prefix, key, suffix); - let res = self.tx().delete(property); + fn del( + &self, + prefix: u8, + key: &Vec, + suffix: Option, + family: &Option, + ) -> Result<(), StorageError> { + let property = RocksdbKCVStore::compute_property(prefix, key, &suffix); + let res = match family { + Some(cf) => self.tx().delete_cf( + self.store + .db + .cf_handle(&cf) + .ok_or(StorageError::UnknownColumnFamily)?, + property, + ), + None => self.tx().delete(property), + }; if res.is_err() { if let ErrorKind::NotFound = res.unwrap_err().kind() { return Ok(()); @@ -144,42 +211,35 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> { /// Delete a specific value for a property from the store. fn del_property_value( - &mut self, + &self, prefix: u8, key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - let property = RocksdbKCVStore::compute_property(prefix, key, suffix); - let exists = self - .tx() - .get_for_update(property.clone(), true) - .map_err(|_e| StorageError::BackendError)?; - match exists { - Some(val) => { - if val.eq(value) { - self.tx() - .delete(property) - .map_err(|_e| StorageError::BackendError)?; - } - } - None => return Err(StorageError::DifferentValue), + let exists = self.get(prefix, key, suffix, family)?; + if exists.eq(value) { + self.del(prefix, key, suffix, family) + } else { + Err(StorageError::DifferentValue) } - Ok(()) } /// Delete all properties of a key from the store. + // TODO: this could be optimized with an iterator fn del_all( - &mut self, + &self, prefix: u8, key: &Vec, all_suffixes: &[u8], + family: &Option, ) -> Result<(), StorageError> { for suffix in all_suffixes { - self.del(prefix, key, Some(*suffix))?; + self.del(prefix, key, Some(*suffix), family)?; } if all_suffixes.is_empty() { - self.del(prefix, key, None)?; + self.del(prefix, key, None, family)?; } Ok(()) } @@ -187,7 +247,7 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> { pub struct RocksdbKCVStore { /// the main store where all the properties of keys are stored - main_db: TransactionDB, + db: TransactionDB, /// path for the storage backend data path: String, } @@ -207,69 +267,57 @@ fn compare(a: &[T], b: &[T]) -> std::cmp::Ordering { } impl ReadTransaction for RocksdbKCVStore { + /// returns a list of (key,value) that are in the range specified in the request fn get_all_keys_and_values( &self, prefix: u8, key_size: usize, key_prefix: Vec, suffix: Option, + family: &Option, ) -> Result, Vec)>, StorageError> { - if key_prefix.len() > key_size { - return Err(StorageError::InvalidValue); - } - - let mut vec_key_start = key_prefix.clone(); - let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()]; - vec_key_start.append(&mut trailing_zeros); - - let mut vec_key_end = key_prefix.clone(); - let mut trailing_max = vec![255u8; key_size - key_prefix.len()]; - vec_key_end.append(&mut trailing_max); - - let property_start = Self::compute_property(prefix, &vec_key_start, suffix); - let property_end = - Self::compute_property(prefix, &vec_key_end, Some(suffix.unwrap_or(255u8))); + let property_start = Self::calc_key_start(prefix, key_size, &key_prefix, &suffix); + let iter = self.get_iterator(&property_start, &family)?; + self.get_all_keys_and_values_(prefix, key_size, key_prefix, suffix, iter) + } - let mut iter = self - .main_db - .iterator(IteratorMode::From(&property_start, Direction::Forward)); - let mut vector: Vec<(Vec, Vec)> = vec![]; - loop { - let res = iter.next(); - match res { - Some(Ok(val)) => { - match compare(&val.0, property_end.as_slice()) { - std::cmp::Ordering::Less | std::cmp::Ordering::Equal => { - if suffix.is_some() { - if val.0.len() < (key_size + 2) - || val.0[1 + key_size] != suffix.unwrap() - { - continue; - } - // } else if val.0.len() > (key_size + 1) { - // continue; - } - vector.push((val.0.to_vec(), val.1.to_vec())); - } - _ => {} //, - } - } - Some(Err(_e)) => return Err(StorageError::BackendError), - None => { - break; - } - } - } - Ok(vector) + /// returns a map of found properties and their value. If `properties` is empty, then all the properties are returned. + /// Otherwise, only the properties in the list are returned (if found in backend storage) + fn get_all_properties_of_key( + &self, + prefix: u8, + key: Vec, + properties: Vec, + family: &Option, + ) -> Result>, StorageError> { + let key_size = key.len(); + let prop_values = self.get_all_keys_and_values(prefix, key_size, key, None, family)?; + Ok(Self::get_all_properties_of_key( + prop_values, + key_size, + &properties, + )) } /// Load a single value property from the store. - fn get(&self, prefix: u8, key: &Vec, suffix: Option) -> Result, StorageError> { - let property = Self::compute_property(prefix, key, suffix); - let res = self - .main_db - .get(property) - .map_err(|_e| StorageError::BackendError)?; + fn get( + &self, + prefix: u8, + key: &Vec, + suffix: Option, + family: &Option, + ) -> Result, StorageError> { + let property = Self::compute_property(prefix, key, &suffix); + let res = match family { + Some(cf) => self.db.get_cf( + self.db + .cf_handle(&cf) + .ok_or(StorageError::UnknownColumnFamily)?, + property, + ), + None => self.db.get(property), + } + .map_err(|_e| StorageError::BackendError)?; match res { Some(val) => Ok(val), None => Err(StorageError::NotFound), @@ -282,6 +330,7 @@ impl ReadTransaction for RocksdbKCVStore { prefix: u8, key: &Vec, suffix: Option, + family: &Option, ) -> Result>, StorageError> { unimplemented!(); } @@ -293,21 +342,13 @@ impl ReadTransaction for RocksdbKCVStore { key: &Vec, suffix: Option, value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - let property = Self::compute_property(prefix, key, suffix); - let exists = self - .main_db - .get(property) - .map_err(|_e| StorageError::BackendError)?; - match exists { - Some(stored_value) => { - if stored_value.eq(value) { - Ok(()) - } else { - Err(StorageError::DifferentValue) - } - } - None => Err(StorageError::NotFound), + let exists = self.get(prefix, key, suffix, family)?; + if exists.eq(value) { + Ok(()) + } else { + Err(StorageError::DifferentValue) } } } @@ -317,7 +358,7 @@ impl KCVStore for RocksdbKCVStore { &self, method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>, ) -> Result<(), StorageError> { - let tx = self.main_db.transaction(); + let tx = self.db.transaction(); let mut transaction = RocksdbTransaction { store: self, @@ -330,16 +371,19 @@ impl KCVStore for RocksdbKCVStore { } res } +} +impl WriteTransaction for RocksdbKCVStore { /// Save a property value to the store. fn put( &self, prefix: u8, key: &Vec, suffix: Option, - value: Vec, + value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - self.write_transaction(&mut |tx| tx.put(prefix, key, suffix, &value)) + self.write_transaction(&mut |tx| tx.put(prefix, key, suffix, value, family)) } /// Replace the property of a key (single value) to the store. @@ -348,14 +392,21 @@ impl KCVStore for RocksdbKCVStore { prefix: u8, key: &Vec, suffix: Option, - value: Vec, + value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - self.write_transaction(&mut |tx| tx.replace(prefix, key, suffix, &value)) + self.write_transaction(&mut |tx| tx.replace(prefix, key, suffix, value, family)) } /// Delete a property from the store. - fn del(&self, prefix: u8, key: &Vec, suffix: Option) -> Result<(), StorageError> { - self.write_transaction(&mut |tx| tx.del(prefix, key, suffix)) + fn del( + &self, + prefix: u8, + key: &Vec, + suffix: Option, + family: &Option, + ) -> Result<(), StorageError> { + self.write_transaction(&mut |tx| tx.del(prefix, key, suffix, family)) } /// Delete a specific value for a property from the store. @@ -364,20 +415,29 @@ impl KCVStore for RocksdbKCVStore { prefix: u8, key: &Vec, suffix: Option, - value: Vec, + value: &Vec, + family: &Option, ) -> Result<(), StorageError> { - self.write_transaction(&mut |tx| tx.del_property_value(prefix, key, suffix, &value)) + self.write_transaction(&mut |tx| tx.del_property_value(prefix, key, suffix, value, family)) } /// Delete all properties of a key from the store. - fn del_all(&self, prefix: u8, key: &Vec, all_suffixes: &[u8]) -> Result<(), StorageError> { - for suffix in all_suffixes { - self.del(prefix, key, Some(*suffix))?; - } - if all_suffixes.is_empty() { - self.del(prefix, key, None)?; - } - Ok(()) + fn del_all( + &self, + prefix: u8, + key: &Vec, + all_suffixes: &[u8], + family: &Option, + ) -> Result<(), StorageError> { + self.write_transaction(&mut |tx| { + for suffix in all_suffixes { + tx.del(prefix, key, Some(*suffix), family)?; + } + if all_suffixes.is_empty() { + tx.del(prefix, key, None, family)?; + } + Ok(()) + }) } } @@ -386,7 +446,123 @@ impl RocksdbKCVStore { PathBuf::from(&self.path) } - fn compute_property(prefix: u8, key: &Vec, suffix: Option) -> Vec { + fn get_all_properties_of_key( + prop_values: Vec<(Vec, Vec)>, + key_size: usize, + properties: &Vec, + ) -> HashMap> { + let mut res = HashMap::new(); + for prop_val in prop_values { + let prop = prop_val.0[1 + key_size]; + if properties.len() > 0 && !properties.contains(&prop) { + continue; + } + res.insert(prop, prop_val.1); + } + res + } + + fn get_all_keys_and_values_( + &self, + prefix: u8, + key_size: usize, + key_prefix: Vec, + suffix: Option, + mut iter: DBIteratorWithThreadMode<'_, impl rocksdb::DBAccess>, + ) -> Result, Vec)>, StorageError> { + if key_prefix.len() > key_size { + return Err(StorageError::InvalidValue); + } + + // let mut vec_key_start = key_prefix.clone(); + // let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()]; + // vec_key_start.append(&mut trailing_zeros); + + let mut vec_key_end = key_prefix.clone(); + let mut trailing_max = vec![255u8; key_size - key_prefix.len()]; + vec_key_end.append(&mut trailing_max); + + // let property_start = Self::compute_property(prefix, &vec_key_start, suffix); + let property_end = + Self::compute_property(prefix, &vec_key_end, &Some(suffix.unwrap_or(255u8))); + + // let mut iter = match family { + // Some(cf) => self.db.iterator_cf( + // self.db + // .cf_handle(&cf) + // .ok_or(StorageError::UnknownColumnFamily)?, + // IteratorMode::From(&property_start, Direction::Forward), + // ), + // None => self + // .db + // .iterator(IteratorMode::From(&property_start, Direction::Forward)), + // }; + let mut vector: Vec<(Vec, Vec)> = vec![]; + loop { + let res = iter.next(); + match res { + Some(Ok(val)) => { + match compare(&val.0, property_end.as_slice()) { + std::cmp::Ordering::Less | std::cmp::Ordering::Equal => { + if suffix.is_some() { + if val.0.len() < (key_size + 2) + || val.0[1 + key_size] != suffix.unwrap() + { + continue; + } + // } else if val.0.len() > (key_size + 1) { + // continue; + } + vector.push((val.0.to_vec(), val.1.to_vec())); + } + _ => {} //, + } + } + Some(Err(_e)) => return Err(StorageError::BackendError), + None => { + break; + } + } + } + Ok(vector) + } + + fn calc_key_start( + prefix: u8, + key_size: usize, + key_prefix: &Vec, + suffix: &Option, + ) -> Vec { + let mut vec_key_start = key_prefix.clone(); + let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()]; + vec_key_start.append(&mut trailing_zeros); + + let mut vec_key_end = key_prefix.clone(); + let mut trailing_max = vec![255u8; key_size - key_prefix.len()]; + vec_key_end.append(&mut trailing_max); + + Self::compute_property(prefix, &vec_key_start, suffix) + } + + fn get_iterator( + &self, + property_start: &[u8], + family: &Option, + ) -> Result, StorageError> { + Ok(match family { + Some(cf) => self.db.iterator_cf( + self.db + .cf_handle(&cf) + .ok_or(StorageError::UnknownColumnFamily)?, + IteratorMode::From(property_start, Direction::Forward), + ), + None => self + .db + .iterator(IteratorMode::From(property_start, Direction::Forward)), + }) + } + + fn compute_property(prefix: u8, key: &Vec, suffix: &Option) -> Vec { let mut new: Vec = Vec::with_capacity(key.len() + 2); new.push(prefix); new.extend(key); @@ -412,7 +588,7 @@ impl RocksdbKCVStore { log_info!("created db with Rocksdb Version: {}", Env::version()); Ok(RocksdbKCVStore { - main_db: db, + db: db, path: path.to_str().unwrap().to_string(), }) } diff --git a/ng-verifier/Cargo.toml b/ng-verifier/Cargo.toml index 8421b83..41a0dc0 100644 --- a/ng-verifier/Cargo.toml +++ b/ng-verifier/Cargo.toml @@ -26,6 +26,11 @@ serde_bytes = "0.11.7" oxigraph = { git = "https://git.nextgraph.org/NextGraph/oxigraph.git", branch="main" } automerge = "0.5.9" yrs = "0.18.2" +async-std = { version = "1.12.0", features = [ "attributes", "unstable" ] } +threshold_crypto = "0.4.0" +rand = { version = "0.7", features = ["getrandom"] } +web-time = "0.2.0" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] -ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" } \ No newline at end of file +ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" } +getrandom = "0.2.7" diff --git a/ng-verifier/src/lib.rs b/ng-verifier/src/lib.rs index f8ff6a9..5f13f5b 100644 --- a/ng-verifier/src/lib.rs +++ b/ng-verifier/src/lib.rs @@ -2,5 +2,7 @@ pub mod types; pub mod user_storage; +pub mod verifier; + #[cfg(not(target_family = "wasm"))] pub mod rocksdb_user_storage; diff --git a/ng-verifier/src/types.rs b/ng-verifier/src/types.rs index 43dadb4..cf1d7c7 100644 --- a/ng-verifier/src/types.rs +++ b/ng-verifier/src/types.rs @@ -11,22 +11,29 @@ use core::fmt; //use oxigraph::io::{RdfFormat, RdfParser, RdfSerializer}; -use oxigraph::store::Store; +//use oxigraph::store::Store; //use oxigraph::model::GroundQuad; #[cfg(not(target_family = "wasm"))] use crate::rocksdb_user_storage::RocksDbUserStorage; use crate::user_storage::{InMemoryUserStorage, UserStorage}; -use std::path::PathBuf; +use async_std::sync::Mutex; +use std::{collections::HashMap, path::PathBuf, sync::Arc}; use ng_net::{ + connection::NoiseFSM, + errors::ProtocolError, types::*, utils::{Receiver, Sender}, }; use ng_repo::{ + block_storage::BlockStorage, errors::{NgError, StorageError}, + file::RandomAccessFile, + store::Store, types::*, }; use serde::{Deserialize, Serialize}; +use web_time::SystemTime; //use yrs::{StateVector, Update}; #[derive(Debug, Clone)] @@ -40,6 +47,7 @@ pub enum VerifierType { Remote(Option), /// IndexedDb based rocksdb compiled to WASM... not ready yet. obviously. only works in the browser WebRocksDb, + // Server, this type is for Server Broker that act as verifier. They answer to VerifierType::Remote types of verifier. } impl VerifierType { @@ -101,70 +109,59 @@ pub struct VerifierConfig { pub type CancelFn = Box; -pub struct Verifier { - pub config: VerifierConfig, - pub connected_server_id: Option, - graph_dataset: Option, - user_storage: Option>, -} +// +// APP PROTOCOL (between APP and VERIFIER) +// -impl fmt::Debug for Verifier { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - writeln!(f, "Verifier\nconfig: {:?}", self.config)?; - writeln!(f, "connected_server_id: {:?}", self.connected_server_id) - } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum AppFetchContentV0 { + Get, // more to be detailed + ReadQuery, // more to be detailed + WriteQuery, // more to be detailed } -impl Verifier { - pub fn new(config: VerifierConfig) -> Result { - let (graph, user) = match &config.config_type { - VerifierConfigType::Memory | VerifierConfigType::JsSaveSession(_) => ( - Some(Store::new().unwrap()), - Some(Box::new(InMemoryUserStorage::new()) as Box), - ), - #[cfg(not(target_family = "wasm"))] - VerifierConfigType::RocksDb(path) => ( - // FIXME BIG TIME: we are reusing the same encryption key here. - // this is very temporary, until we remove the code in oxi_rocksdb of oxigraph, - // and have oxigraph use directly the UserStorage - Some(Store::open_with_key(path, config.user_master_key).unwrap()), - Some( - Box::new(RocksDbUserStorage::open(path, config.user_master_key)?) - as Box, - ), - ), - VerifierConfigType::Remote(_) => (None, None), - _ => unimplemented!(), // can be WebRocksDb or RocksDb on wasm platforms - }; - Ok(Verifier { - config, - connected_server_id: None, - graph_dataset: graph, - user_storage: user, - }) - } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct AppFetchV0 { + pub doc_id: RepoId, - pub fn doc_fetch( - &self, - nuri: String, - payload: Option, - ) -> Result<(Receiver, CancelFn), NgError> { - unimplemented!(); - } + pub branch_id: Option, + + pub store: StoreRepo, + + pub content: AppFetchContentV0, } -// -// APP PROTOCOL (between APP and VERIFIER) -// +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum AppRequestContentV0 { + FetchNuri, + Fetch(AppFetchV0), + Pin, + UnPin, + Delete, + Create, + FileGet, // needs the Nuri of branch/doc/store AND ObjectId + FilePut, // needs the Nuri of branch/doc/store +} #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct AppRequestV0 {} +pub struct AppRequestV0 { + pub nuri: Option, + + pub content: AppRequestContentV0, + + pub payload: Option, +} #[derive(Clone, Debug, Serialize, Deserialize)] pub enum AppRequest { V0(AppRequestV0), } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum AppQuery { + V0(String), // Sparql +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct GraphUpdate { sparql_update: String, @@ -202,8 +199,12 @@ pub struct AppDelete { #[derive(Clone, Debug, Serialize, Deserialize)] pub enum AppRequestPayloadV0 { Create(AppCreate), + Query(AppQuery), Update(AppUpdate), Delete(AppDelete), + SmallFilePut(SmallFile), + RandomAccessFilePut(String), // content_type + RandomAccessFilePutChunk((ObjectId, Vec)), // end the upload with an empty vec } #[derive(Clone, Debug, Serialize, Deserialize)] @@ -256,10 +257,20 @@ pub struct AppPatch { discrete: Option, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct FileName { + name: Option, + reference: ObjectRef, +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub enum AppResponseV0 { State(AppState), Patch(AppPatch), + Text(String), + File(FileName), + FileBinary(Vec), + QueryResult, // see sparesults } #[derive(Clone, Debug, Serialize, Deserialize)] diff --git a/ng-verifier/src/verifier.rs b/ng-verifier/src/verifier.rs new file mode 100644 index 0000000..ce15fcf --- /dev/null +++ b/ng-verifier/src/verifier.rs @@ -0,0 +1,309 @@ +// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers +// All rights reserved. +// Licensed under the Apache License, Version 2.0 +// +// or the MIT license , +// at your option. All files in the project carrying such +// notice may not be copied, modified, or distributed except +// according to those terms. + +//! Repo object (on heap) to handle a Repository + +use crate::types::*; +use ng_repo::log::*; +use ng_repo::object::Object; +use ng_repo::{ + block_storage::BlockStorage, + errors::{NgError, StorageError}, + file::RandomAccessFile, + repo::Repo, + store::Store, + types::*, + utils::{generate_keypair, sign}, +}; + +use core::fmt; +//use oxigraph::io::{RdfFormat, RdfParser, RdfSerializer}; +//use oxigraph::store::Store; +//use oxigraph::model::GroundQuad; +#[cfg(not(target_family = "wasm"))] +use crate::rocksdb_user_storage::RocksDbUserStorage; +use crate::user_storage::{InMemoryUserStorage, UserStorage}; +use async_std::sync::Mutex; +use std::{collections::HashMap, path::PathBuf, sync::Arc}; + +use ng_net::{ + connection::NoiseFSM, + errors::ProtocolError, + types::*, + utils::{Receiver, Sender}, +}; + +use serde::{Deserialize, Serialize}; +use web_time::SystemTime; +//use yrs::{StateVector, Update}; + +pub struct Verifier { + pub config: VerifierConfig, + pub connected_server_id: Option, + graph_dataset: Option, + user_storage: Option>, + block_storage: Option>>, + last_seq_num: u64, + peer_id: PubKey, + max_reserved_seq_num: u64, + last_reservation: SystemTime, + stores: HashMap, + repos: HashMap, +} + +impl fmt::Debug for Verifier { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "Verifier\nconfig: {:?}", self.config)?; + writeln!(f, "connected_server_id: {:?}", self.connected_server_id) + } +} + +impl Verifier { + #[cfg(test)] + pub fn new_dummy() -> Self { + let (peer_priv_key, peer_id) = generate_keypair(); + let block_storage = Arc::new(RwLock::new(HashMapBlockStorage::new())) + as Arc>>; + Verifier { + config: VerifierConfig { + config_type: VerifierConfigType::Memory, + user_master_key: [0; 32], + peer_priv_key, + user_priv_key: PrivKey::random_ed(), + private_store_read_cap: ObjectRef::dummy(), + }, + connected_server_id: None, + graph_dataset: None, + user_storage: None, + block_storage: Some(block_storage), + last_seq_num: 0, + peer_id, + max_reserved_seq_num: 1, + last_reservation: SystemTime::now(), + stores: HashMap::new(), + repos: HashMap::new(), + } + } + + pub fn get_store(&mut self, store_repo: &StoreRepo) -> &mut Store { + let overlay_id = store_repo.overlay_id_for_storage_purpose(); + if self.stores.get(&overlay_id).is_none() { + // FIXME: get store_readcap from user storage + let store_readcap = ReadCap::nil(); + let store = Store::new( + *store_repo, + store_readcap, + Arc::clone( + &self + .block_storage + .as_ref() + .ok_or(core::fmt::Error) + .expect("get_store cannot be called on Remote Verifier"), + ), + ); + //self.stores.insert(overlay_id, store); + let store = self.stores.entry(overlay_id).or_insert(store); + store + } else { + self.stores.get_mut(&overlay_id).unwrap() + } + } + + pub(crate) fn new_event( + &mut self, + //publisher: &PrivKey, + //seq: &mut u64, + commit: &Commit, + additional_blocks: &Vec, + //topic_id: TopicId, + //topic_priv_key: &BranchWriteCapSecret, + store: &Store, // store could be omitted and a store repo ID would be given instead. + ) -> Result { + let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch + let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId) + let seq = self.last_seq_number()?; + Event::new( + &self.config.peer_priv_key, + seq, + commit, + additional_blocks, + topic_id, + &topic_priv_key, + store, + ) + } + + pub(crate) fn last_seq_number(&mut self) -> Result { + if self.last_seq_num - 1 >= self.max_reserved_seq_num { + self.reserve_more(1)?; + } + self.last_seq_num += 1; + Ok(self.last_seq_num) + } + + pub(crate) fn new_events( + &mut self, + events: Vec<(Commit, Vec)>, + store: &Store, + ) -> Result, NgError> { + let missing_count = events.len() as i64 - self.available_seq_nums() as i64; + // this is reducing the capacity of reserver_seq_num by half (cast from u64 to i64) + // but we will never reach situation where so many seq_nums are reserved, neither such a big list of events to processs + if missing_count >= 0 { + self.reserve_more(missing_count as u64 + 1)?; + } + let mut res = vec![]; + for event in events { + let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch + let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId) + self.last_seq_num += 1; + let event = Event::new( + &self.config.peer_priv_key, + self.last_seq_num, + &event.0, + &event.1, + topic_id, + &topic_priv_key, + store, + )?; + res.push(event); + } + Ok(res) + } + + fn available_seq_nums(&self) -> u64 { + self.max_reserved_seq_num - self.last_seq_num + } + + fn reserve_more(&mut self, at_least: u64) -> Result<(), NgError> { + // the qty should be calculated based on the last_reservation. the closer to now, the higher the qty. + // below 1 sec, => 100 + // below 5 sec, => 10 + // below 10 sec => 1 + self.take_some_peer_last_seq_numbers(10) + } + + fn take_some_peer_last_seq_numbers(&mut self, qty: u16) -> Result<(), NgError> { + // TODO the magic + + Ok(()) + } + + pub fn new( + config: VerifierConfig, + block_storage: Arc>, + ) -> Result { + let (graph, user, block) = match &config.config_type { + VerifierConfigType::Memory | VerifierConfigType::JsSaveSession(_) => ( + Some(oxigraph::store::Store::new().unwrap()), + Some(Box::new(InMemoryUserStorage::new()) as Box), + Some(block_storage), + ), + #[cfg(not(target_family = "wasm"))] + VerifierConfigType::RocksDb(path) => ( + // FIXME BIG TIME: we are reusing the same encryption key here. + // this is very temporary, until we remove the code in oxi_rocksdb of oxigraph, + // and have oxigraph use directly the UserStorage + Some(oxigraph::store::Store::open_with_key(path, config.user_master_key).unwrap()), + Some( + Box::new(RocksDbUserStorage::open(path, config.user_master_key)?) + as Box, + ), + Some(block_storage), + ), + VerifierConfigType::Remote(_) => (None, None, None), + _ => unimplemented!(), // can be WebRocksDb or RocksDb on wasm platforms + }; + let peer_id = config.peer_priv_key.to_pub(); + let mut verif = Verifier { + config, + connected_server_id: None, + graph_dataset: graph, + user_storage: user, + block_storage: block, + peer_id, + last_reservation: SystemTime::now(), + max_reserved_seq_num: 0, + last_seq_num: 0, + stores: HashMap::new(), + repos: HashMap::new(), + }; + verif.take_some_peer_last_seq_numbers(1)?; + Ok(verif) + } + + pub fn doc_fetch( + &mut self, + nuri: String, + payload: Option, + ) -> Result<(Receiver, CancelFn), NgError> { + unimplemented!(); + } + + pub async fn respond( + &mut self, + msg: ProtocolMessage, + fsm: Arc>, + ) -> Result<(), ProtocolError> { + unimplemented!(); + } + + /// returns the Repo and the last seq_num of the peer + pub fn new_repo_default<'a>( + &'a mut self, + creator: &UserId, + creator_priv_key: &PrivKey, + //store_repo: &StoreRepo, + store: Box, + ) -> Result<(&'a Repo, Vec), NgError> { + //let store = self.get_store(store_repo); + let (repo, proto_events) = store.create_repo_default(creator, creator_priv_key)?; + + //repo.store = Some(store); + let events = self.new_events(proto_events, &repo.store)?; + + let repo_ref = self.repos.entry(repo.id).or_insert(repo); + Ok((repo_ref, events)) + } +} +#[cfg(test)] +mod test { + + use crate::types::*; + use crate::verifier::*; + use ng_repo::log::*; + + #[test] + pub fn test_new_repo_default() { + let (creator_priv_key, creator_pub_key) = generate_keypair(); + + let (publisher_privkey, publisher_pubkey) = generate_keypair(); + let publisher_peer = PeerId::Forwarded(publisher_pubkey); + + let store = Store::dummy_public_v0(); + + let mut verifier = Verifier::new_dummy(); + //let store = verifier.get_store(store_repo); + + let (repo, events) = verifier + .new_repo_default(&creator_pub_key, &creator_priv_key, store) + .expect("new_default"); + + log_debug!("REPO OBJECT {}", repo); + + log_debug!("events: {}\n", events.len()); + let mut i = 0; + for e in events { + log_debug!("========== EVENT {:03}: {}", i, e); + i += 1; + } + + assert_eq!(verifier.last_seq_number(), 6); + } +}