refactor UserStorage and BlockStorage

pull/19/head
Niko PLP 9 months ago
parent 9370a9216e
commit f349d4a748
  1. 7
      Cargo.lock
  2. 4
      nextgraph/Cargo.toml
  3. 165
      nextgraph/src/local_broker.rs
  4. 36
      ng-broker/src/broker_storage/account.rs
  5. 6
      ng-broker/src/broker_storage/config.rs
  6. 26
      ng-broker/src/broker_storage/invitation.rs
  7. 35
      ng-broker/src/broker_storage/overlay.rs
  8. 23
      ng-broker/src/broker_storage/peer.rs
  9. 24
      ng-broker/src/broker_storage/topic.rs
  10. 6
      ng-broker/src/broker_storage/wallet.rs
  11. 56
      ng-net/src/broker.rs
  12. 9
      ng-net/src/connection.rs
  13. 18
      ng-repo/src/block_storage.rs
  14. 128
      ng-repo/src/branch.rs
  15. 205
      ng-repo/src/commit.rs
  16. 1
      ng-repo/src/errors.rs
  17. 23
      ng-repo/src/event.rs
  18. 287
      ng-repo/src/file.rs
  19. 107
      ng-repo/src/kcv_storage.rs
  20. 2
      ng-repo/src/lib.rs
  21. 113
      ng-repo/src/object.rs
  22. 457
      ng-repo/src/repo.rs
  23. 474
      ng-repo/src/store.rs
  24. 48
      ng-repo/src/types.rs
  25. 1020
      ng-storage-rocksdb/src/block_storage.rs
  26. 464
      ng-storage-rocksdb/src/kcv_storage.rs
  27. 7
      ng-verifier/Cargo.toml
  28. 2
      ng-verifier/src/lib.rs
  29. 115
      ng-verifier/src/types.rs
  30. 309
      ng-verifier/src/verifier.rs

7
Cargo.lock generated

@ -3223,10 +3223,12 @@ version = "0.1.0"
dependencies = [
"async-once-cell",
"async-std",
"async-trait",
"base64-url",
"ng-client-ws",
"ng-net",
"ng-repo",
"ng-storage-rocksdb",
"ng-verifier",
"ng-wallet",
"once_cell",
@ -3413,16 +3415,21 @@ dependencies = [
name = "ng-verifier"
version = "0.1.0"
dependencies = [
"async-std",
"automerge",
"blake3",
"chacha20",
"getrandom 0.2.10",
"ng-net",
"ng-repo",
"ng-storage-rocksdb",
"oxigraph",
"rand 0.7.3",
"serde",
"serde_bare",
"serde_bytes",
"threshold_crypto",
"web-time",
"yrs",
]

@ -31,6 +31,10 @@ web-time = "0.2.0"
async-std = { version = "1.12.0", features = [ "attributes", "unstable" ] }
zeroize = { version = "1.6.0", features = ["zeroize_derive"] }
serde_json = "1.0"
async-trait = "0.1.64"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" }
[[example]]
name = "in_memory"

@ -8,13 +8,17 @@
// according to those terms.
use async_once_cell::OnceCell;
use async_std::sync::{Arc, RwLock};
use async_std::sync::{Arc, Mutex, RwLock};
use core::fmt;
use ng_net::connection::{ClientConfig, IConnect, StartConfig};
use ng_net::types::{ClientInfo, ClientType};
use ng_net::actor::EActor;
use ng_net::connection::{ClientConfig, IConnect, NoiseFSM, StartConfig};
use ng_net::errors::ProtocolError;
use ng_net::types::{ClientInfo, ClientType, ProtocolMessage};
use ng_net::utils::{Receiver, Sender};
use ng_repo::block_storage::HashMapBlockStorage;
use ng_repo::os_info::get_os_info;
use ng_verifier::types::*;
use ng_verifier::verifier::Verifier;
use ng_wallet::emojis::encode_pazzle;
use once_cell::sync::Lazy;
use serde_bare::to_vec;
@ -25,6 +29,7 @@ use std::path::PathBuf;
use zeroize::{Zeroize, ZeroizeOnDrop};
use ng_net::broker::*;
use ng_repo::block_storage::BlockStorage;
use ng_repo::errors::NgError;
use ng_repo::log::*;
use ng_repo::types::*;
@ -35,6 +40,8 @@ use ng_wallet::{create_wallet_v0, types::*};
use ng_client_ws::remote_ws::ConnectionWebSocket;
#[cfg(target_arch = "wasm32")]
use ng_client_ws::remote_ws_wasm::ConnectionWebSocket;
#[cfg(not(target_arch = "wasm32"))]
use ng_storage_rocksdb::block_storage::RocksDbBlockStorage;
type JsStorageReadFn = dyn Fn(String) -> Result<String, NgError> + 'static + Sync + Send;
type JsStorageWriteFn = dyn Fn(String, String) -> Result<(), NgError> + 'static + Sync + Send;
@ -173,6 +180,16 @@ impl LocalBrokerConfig {
_ => None,
}
}
fn compute_path(&self, dir: &String) -> Result<PathBuf, NgError> {
match self {
Self::BasePath(path) => {
let mut new_path = path.clone();
new_path.push(dir);
Ok(new_path)
}
_ => Err(NgError::InvalidArgument),
}
}
}
#[derive(Debug)]
@ -296,13 +313,24 @@ impl SessionConfig {
// }
// }
struct OpenedWallet {
wallet: SensitiveWallet,
block_storage: Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>,
}
impl fmt::Debug for OpenedWallet {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "OpenedWallet.\nwallet {:?}", self.wallet)
}
}
#[derive(Debug)]
struct LocalBroker {
pub config: LocalBrokerConfig,
pub wallets: HashMap<String, LocalWalletStorageV0>,
pub opened_wallets: HashMap<String, SensitiveWallet>,
pub opened_wallets: HashMap<String, OpenedWallet>,
pub sessions: HashMap<UserId, SessionPeerStorageV0>,
@ -311,7 +339,31 @@ struct LocalBroker {
pub opened_sessions_list: Vec<Option<Session>>,
}
impl ILocalBroker for LocalBroker {}
// used to deliver events to the verifier on Clients, or Core that have Verifiers attached.
#[async_trait::async_trait]
impl ILocalBroker for LocalBroker {
async fn deliver(&mut self, event: Event) {}
}
// this is used if an Actor does a BROKER.local_broker.respond
// it happens when a remote peer is doing a request on the verifier
#[async_trait::async_trait]
impl EActor for LocalBroker {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
// search opened_sessions by user_id of fsm
let session = match fsm.lock().await.user_id() {
Some(user) => self
.get_mut_session_for_user(&user)
.ok_or(ProtocolError::ActorError)?,
None => return Err(ProtocolError::ActorError),
};
session.verifier.respond(msg, fsm).await
}
}
impl LocalBroker {
fn storage_path_for_user(&self, user_id: &UserId) -> Option<PathBuf> {
@ -325,6 +377,13 @@ impl LocalBroker {
}
}
fn get_mut_session_for_user(&mut self, user: &UserId) -> Option<&mut Session> {
match self.opened_sessions.get(user) {
Some(idx) => self.opened_sessions_list[*idx as usize].as_mut(),
None => None,
}
}
fn verifier_config_type_from_session_config(
&self,
config: &SessionConfig,
@ -356,12 +415,13 @@ impl LocalBroker {
let session = self.opened_sessions_list[*session_idx as usize]
.as_mut()
.ok_or(NgError::SessionNotFound)?;
let wallet = match &session.config {
let wallet = &match &session.config {
SessionConfig::V0(v0) => self
.opened_wallets
.get(&v0.wallet_name)
.ok_or(NgError::WalletNotFound),
}?;
}?
.wallet;
Ok((wallet, session))
}
@ -426,7 +486,13 @@ async fn init_(config: LocalBrokerConfig) -> Result<Arc<RwLock<LocalBroker>>, Ng
};
//log_debug!("{:?}", &local_broker);
Ok(Arc::new(RwLock::new(local_broker)))
let broker = Arc::new(RwLock::new(local_broker));
BROKER.write().await.set_local_broker(Arc::clone(
&(Arc::clone(&broker) as Arc<RwLock<dyn ILocalBroker>>),
));
Ok(broker)
}
#[doc(hidden)]
@ -671,20 +737,54 @@ pub async fn wallet_was_opened(mut wallet: SensitiveWallet) -> Result<ClientV0,
if broker.opened_wallets.get(&wallet.id()).is_some() {
return Err(NgError::WalletAlreadyOpened);
}
let wallet_id = wallet.id();
match broker.wallets.get(&(wallet.id())) {
let lws = match broker.wallets.get(&wallet_id) {
Some(lws) => {
if wallet.client().is_none() {
// this case happens when the wallet is opened and not when it is imported (as the client is already there)
wallet.set_client(lws.to_client_v0(wallet.privkey())?);
}
lws
}
None => {
return Err(NgError::WalletNotFound);
}
}
};
let block_storage = if lws.in_memory {
Arc::new(std::sync::RwLock::new(HashMapBlockStorage::new()))
as Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync + 'static>>
} else {
#[cfg(not(target_family = "wasm"))]
{
let mut key_material = wallet
.client()
.as_ref()
.unwrap()
.sensitive_client_storage
.priv_key
.slice();
let path = broker
.config
.compute_path(&wallet.client().as_ref().unwrap().id.to_hash_string())?;
let mut key: [u8; 32] =
derive_key("NextGraph Client BlockStorage BLAKE3 key", key_material);
Arc::new(std::sync::RwLock::new(RocksDbBlockStorage::open(
&path, key,
)?)) as Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync + 'static>>
}
#[cfg(target_family = "wasm")]
{
panic!("no RocksDB in WASM");
}
};
let client = wallet.client().as_ref().unwrap().clone();
broker.opened_wallets.insert(wallet.id(), wallet);
let opened_wallet = OpenedWallet {
wallet,
block_storage,
};
broker.opened_wallets.insert(wallet_id, opened_wallet);
Ok(client)
}
@ -706,14 +806,16 @@ pub async fn session_start(mut config: SessionConfig) -> Result<SessionInfo, NgE
match broker.opened_wallets.get(&wallet_name) {
None => return Err(NgError::WalletNotFound),
Some(wallet) => {
let credentials = match wallet.individual_site(&user_id) {
Some(opened_wallet) => {
let block_storage = Arc::clone(&opened_wallet.block_storage);
let credentials = match opened_wallet.wallet.individual_site(&user_id) {
Some(creds) => creds.clone(),
None => return Err(NgError::NotFound),
};
let client_storage_master_key = serde_bare::to_vec(
&wallet
&opened_wallet
.wallet
.client()
.as_ref()
.unwrap()
@ -744,7 +846,7 @@ pub async fn session_start(mut config: SessionConfig) -> Result<SessionInfo, NgE
let decoded = base64_url::decode(&string)
.map_err(|_| NgError::SerializationError)?;
Some(SessionWalletStorageV0::dec_session(
wallet.privkey(),
opened_wallet.wallet.privkey(),
&decoded,
)?)
}
@ -759,7 +861,7 @@ pub async fn session_start(mut config: SessionConfig) -> Result<SessionInfo, NgE
let res = read(path);
if res.is_ok() {
Some(SessionWalletStorageV0::dec_session(
wallet.privkey(),
opened_wallet.wallet.privkey(),
&res.unwrap(),
)?)
} else {
@ -832,13 +934,16 @@ pub async fn session_start(mut config: SessionConfig) -> Result<SessionInfo, NgE
key_material.as_slice(),
);
key_material.zeroize();
let verifier = Verifier::new(VerifierConfig {
config_type: broker.verifier_config_type_from_session_config(&config),
user_master_key: key,
peer_priv_key: session.peer_key.clone(),
user_priv_key: credentials.0,
private_store_read_cap: credentials.1,
})?;
let verifier = Verifier::new(
VerifierConfig {
config_type: broker.verifier_config_type_from_session_config(&config),
user_master_key: key,
peer_priv_key: session.peer_key.clone(),
user_priv_key: credentials.0,
private_store_read_cap: credentials.1,
},
block_storage,
)?;
key.zeroize();
broker.opened_sessions_list.push(Some(Session {
config,
@ -919,7 +1024,7 @@ pub async fn user_connect_with_device_info(
location: Option<String>,
) -> Result<Vec<(String, String, String, Option<String>, f64)>, NgError> {
//FIXME: release this write lock much sooner than at the end of the loop of all tries to connect to some servers ?
// or maybe it is good to block as we dont want concurrent connection attemps potentially to the same server
// or maybe it is good to block as we dont want concurrent connection attempts potentially to the same server
let mut local_broker = match LOCAL_BROKER.get() {
None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
Some(Ok(broker)) => broker.write().await,
@ -1084,8 +1189,8 @@ pub async fn wallet_close(wallet_name: &String) -> Result<(), NgError> {
};
match broker.opened_wallets.remove(wallet_name) {
Some(mut wallet) => {
for user in wallet.sites() {
Some(mut opened_wallet) => {
for user in opened_wallet.wallet.sites() {
let key: PubKey = (user.as_str()).try_into().unwrap();
match broker.opened_sessions.remove(&key) {
Some(id) => {
@ -1094,7 +1199,7 @@ pub async fn wallet_close(wallet_name: &String) -> Result<(), NgError> {
None => {}
}
}
wallet.zeroize();
opened_wallet.wallet.zeroize();
}
None => return Err(NgError::WalletNotFound),
}
@ -1123,15 +1228,15 @@ pub async fn doc_fetch(
nuri: String,
payload: Option<AppRequestPayload>,
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
let broker = match LOCAL_BROKER.get() {
let mut broker = match LOCAL_BROKER.get() {
None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
Some(Ok(broker)) => broker.read().await,
Some(Ok(broker)) => broker.write().await,
};
if session_id as usize >= broker.opened_sessions_list.len() {
return Err(NgError::InvalidArgument);
}
let session = broker.opened_sessions_list[session_id as usize]
.as_ref()
.as_mut()
.ok_or(NgError::SessionNotFound)?;
session.verifier.doc_fetch(nuri, payload)

@ -60,7 +60,13 @@ impl<'a> Account<'a> {
if acc.exists() {
return Err(StorageError::AlreadyExists);
}
store.put(Self::PREFIX_ACCOUNT, &to_vec(&id)?, None, to_vec(&admin)?)?;
store.put(
Self::PREFIX_ACCOUNT,
&to_vec(&id)?,
None,
&to_vec(&admin)?,
&None,
)?;
Ok(acc)
}
@ -71,7 +77,9 @@ impl<'a> Account<'a> {
) -> Result<Vec<UserId>, StorageError> {
let size = to_vec(&UserId::nil())?.len();
let mut res: Vec<UserId> = vec![];
for user in store.get_all_keys_and_values(Self::PREFIX_ACCOUNT, size, vec![], None)? {
for user in
store.get_all_keys_and_values(Self::PREFIX_ACCOUNT, size, vec![], None, &None)?
{
let admin: bool = from_slice(&user.1)?;
if admin == admins {
let id: UserId = from_slice(&user.0[1..user.0.len()])?;
@ -82,7 +90,12 @@ impl<'a> Account<'a> {
}
pub fn exists(&self) -> bool {
self.store
.get(Self::PREFIX_ACCOUNT, &to_vec(&self.id).unwrap(), None)
.get(
Self::PREFIX_ACCOUNT,
&to_vec(&self.id).unwrap(),
None,
&None,
)
.is_ok()
}
pub fn id(&self) -> UserId {
@ -106,10 +119,10 @@ impl<'a> Account<'a> {
let mut id_and_client = to_vec(&self.id)?;
id_and_client.append(&mut client_key_ser);
if tx
.has_property_value(Self::PREFIX_CLIENT, &id_and_client, None, &vec![])
.has_property_value(Self::PREFIX_CLIENT, &id_and_client, None, &vec![], &None)
.is_err()
{
tx.put(Self::PREFIX_CLIENT, &id_and_client, None, &vec![])?;
tx.put(Self::PREFIX_CLIENT, &id_and_client, None, &vec![], &None)?;
}
if tx
.has_property_value(
@ -117,6 +130,7 @@ impl<'a> Account<'a> {
&id_and_client,
Some(Self::INFO),
&info_ser,
&None,
)
.is_err()
{
@ -125,6 +139,7 @@ impl<'a> Account<'a> {
&id_and_client,
Some(Self::INFO),
&info_ser,
&None,
)?;
}
let now = SystemTime::now()
@ -136,6 +151,7 @@ impl<'a> Account<'a> {
&id_and_client,
Some(Self::LAST_SEEN),
&to_vec(&now)?,
&None,
)?;
Ok(())
})
@ -187,6 +203,7 @@ impl<'a> Account<'a> {
&to_vec(&self.id)?,
None,
&to_vec(&true)?,
&None,
)
.is_ok()
{
@ -206,17 +223,20 @@ impl<'a> Account<'a> {
let mut client_key_ser = to_vec(&client_key)?;
let size = client_key_ser.len() + id.len();
if let Ok(clients) = tx.get_all_keys_and_values(Self::PREFIX_CLIENT, size, id, None) {
if let Ok(clients) =
tx.get_all_keys_and_values(Self::PREFIX_CLIENT, size, id, None, &None)
{
for client in clients {
tx.del(Self::PREFIX_CLIENT, &client.0, None)?;
tx.del(Self::PREFIX_CLIENT, &client.0, None, &None)?;
tx.del_all(
Self::PREFIX_CLIENT_PROPERTY,
&client.0,
&Self::ALL_CLIENT_PROPERTIES,
&None,
)?;
}
}
tx.del(Self::PREFIX_ACCOUNT, &to_vec(&self.id)?, None)?;
tx.del(Self::PREFIX_ACCOUNT, &to_vec(&self.id)?, None, &None)?;
Ok(())
})
}

@ -75,7 +75,8 @@ impl<'a> Config<'a> {
Self::PREFIX,
&to_vec(&Self::KEY)?,
Some(Self::MODE),
to_vec(&mode)?,
&to_vec(&mode)?,
&None,
)?;
Ok(acc)
}
@ -85,13 +86,14 @@ impl<'a> Config<'a> {
Self::PREFIX,
&to_vec(&Self::KEY).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
pub fn mode(&self) -> Result<ConfigMode, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE))
.get(Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE), &None)
{
Ok(ver) => Ok(from_slice::<ConfigMode>(&ver)?),
Err(e) => Err(e),

@ -75,7 +75,13 @@ impl<'a> Invitation<'a> {
}
let mut value = to_vec(&(code_type, expiry, memo.clone()))?;
store.write_transaction(&mut |tx| {
tx.put(Self::PREFIX, &to_vec(code)?, Some(Self::TYPE), &value)?;
tx.put(
Self::PREFIX,
&to_vec(code)?,
Some(Self::TYPE),
&value,
&None,
)?;
Ok(())
})?;
Ok(acc)
@ -94,7 +100,7 @@ impl<'a> Invitation<'a> {
unique = true;
multi = true;
}
for invite in store.get_all_keys_and_values(Self::PREFIX, size, vec![], None)? {
for invite in store.get_all_keys_and_values(Self::PREFIX, size, vec![], None, &None)? {
if invite.0.len() == size + 2 {
let code: [u8; 32] = from_slice(&invite.0[1..invite.0.len() - 1])?;
if invite.0[size + 1] == Self::TYPE {
@ -138,6 +144,7 @@ impl<'a> Invitation<'a> {
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
@ -148,7 +155,7 @@ impl<'a> Invitation<'a> {
pub fn get_type(&self) -> Result<u8, ProtocolError> {
let type_ser = self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE))?;
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE), &None)?;
let t: (u8, u32, Option<String>) = from_slice(&type_ser)?;
// if t.1 < now_timestamp() {
// return Err(ProtocolError::Expired);
@ -157,9 +164,9 @@ impl<'a> Invitation<'a> {
}
pub fn is_expired(&self) -> Result<bool, StorageError> {
let expire_ser = self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE))?;
let expire_ser =
self.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE), &None)?;
let expire: (u8, u32, Option<String>) = from_slice(&expire_ser)?;
if expire.1 < now_timestamp() {
return Ok(true);
@ -169,7 +176,12 @@ impl<'a> Invitation<'a> {
pub fn del(&self) -> Result<(), StorageError> {
self.store.write_transaction(&mut |tx| {
tx.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)?;
tx.del_all(
Self::PREFIX,
&to_vec(&self.id)?,
&Self::ALL_PROPERTIES,
&None,
)?;
Ok(())
})
}

@ -79,6 +79,7 @@ impl<'a> Overlay<'a> {
&to_vec(&id)?,
Some(Self::SECRET),
&to_vec(&secret)?,
&None,
)?;
if repo.is_some() {
tx.put(
@ -86,6 +87,7 @@ impl<'a> Overlay<'a> {
&to_vec(&id)?,
Some(Self::REPO),
&to_vec(&repo.unwrap())?,
&None,
)?;
}
let meta = OverlayMeta {
@ -97,6 +99,7 @@ impl<'a> Overlay<'a> {
&to_vec(&id)?,
Some(Self::META),
&to_vec(&meta)?,
&None,
)?;
Ok(())
})?;
@ -108,6 +111,7 @@ impl<'a> Overlay<'a> {
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
@ -122,7 +126,8 @@ impl<'a> Overlay<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::PEER),
to_vec(peer)?,
&to_vec(peer)?,
&None,
)
}
pub fn remove_peer(&self, peer: &PeerId) -> Result<(), StorageError> {
@ -130,7 +135,8 @@ impl<'a> Overlay<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::PEER),
to_vec(peer)?,
&to_vec(peer)?,
&None,
)
}
@ -140,6 +146,7 @@ impl<'a> Overlay<'a> {
&to_vec(&self.id)?,
Some(Self::PEER),
&to_vec(peer)?,
&None,
)
}
@ -151,7 +158,8 @@ impl<'a> Overlay<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::TOPIC),
to_vec(topic)?,
&to_vec(topic)?,
&None,
)
}
pub fn remove_topic(&self, topic: &TopicId) -> Result<(), StorageError> {
@ -159,7 +167,8 @@ impl<'a> Overlay<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::TOPIC),
to_vec(topic)?,
&to_vec(topic)?,
&None,
)
}
@ -169,13 +178,14 @@ impl<'a> Overlay<'a> {
&to_vec(&self.id)?,
Some(Self::TOPIC),
&to_vec(topic)?,
&None,
)
}
pub fn secret(&self) -> Result<SymKey, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::SECRET))
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::SECRET), &None)
{
Ok(secret) => Ok(from_slice::<SymKey>(&secret)?),
Err(e) => Err(e),
@ -185,7 +195,7 @@ impl<'a> Overlay<'a> {
pub fn metadata(&self) -> Result<OverlayMeta, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META))
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META), &None)
{
Ok(meta) => Ok(from_slice::<OverlayMeta>(&meta)?),
Err(e) => Err(e),
@ -199,14 +209,15 @@ impl<'a> Overlay<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::META),
to_vec(meta)?,
&to_vec(meta)?,
&None,
)
}
pub fn repo(&self) -> Result<PubKey, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::REPO))
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::REPO), &None)
{
Ok(repo) => Ok(from_slice::<PubKey>(&repo)?),
Err(e) => Err(e),
@ -214,7 +225,11 @@ impl<'a> Overlay<'a> {
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
self.store.del_all(
Self::PREFIX,
&to_vec(&self.id)?,
&Self::ALL_PROPERTIES,
&None,
)
}
}

@ -77,12 +77,14 @@ impl<'a> Peer<'a> {
&to_vec(&id)?,
Some(Self::VERSION),
&to_vec(&advert.version())?,
&None,
)?;
tx.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::ADVERT),
&to_vec(&advert)?,
&None,
)?;
Ok(())
})?;
@ -94,6 +96,7 @@ impl<'a> Peer<'a> {
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
@ -103,7 +106,7 @@ impl<'a> Peer<'a> {
pub fn version(&self) -> Result<u32, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION))
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION), &None)
{
Ok(ver) => Ok(from_slice::<u32>(&ver)?),
Err(e) => Err(e),
@ -117,7 +120,8 @@ impl<'a> Peer<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::VERSION),
to_vec(&version)?,
&to_vec(&version)?,
&None,
)
}
pub fn update_advert(&self, advert: &PeerAdvert) -> Result<(), StorageError> {
@ -134,12 +138,14 @@ impl<'a> Peer<'a> {
&to_vec(&self.id)?,
Some(Self::VERSION),
&to_vec(&advert.version())?,
&None,
)?;
tx.replace(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::ADVERT),
&to_vec(&advert)?,
&None,
)?;
Ok(())
})
@ -147,7 +153,7 @@ impl<'a> Peer<'a> {
pub fn advert(&self) -> Result<PeerAdvert, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT))
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT), &None)
{
Ok(advert) => Ok(from_slice::<PeerAdvert>(&advert)?),
Err(e) => Err(e),
@ -161,12 +167,17 @@ impl<'a> Peer<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::ADVERT),
to_vec(advert)?,
&to_vec(advert)?,
&None,
)
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
self.store.del_all(
Self::PREFIX,
&to_vec(&self.id)?,
&Self::ALL_PROPERTIES,
&None,
)
}
}

@ -63,7 +63,8 @@ impl<'a> Topic<'a> {
Self::PREFIX,
&to_vec(&id)?,
Some(Self::META),
to_vec(&meta)?,
&to_vec(&meta)?,
&None,
)?;
Ok(acc)
}
@ -73,6 +74,7 @@ impl<'a> Topic<'a> {
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
@ -87,7 +89,8 @@ impl<'a> Topic<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::HEAD),
to_vec(head)?,
&to_vec(head)?,
&None,
)
}
pub fn remove_head(&self, head: &ObjectId) -> Result<(), StorageError> {
@ -95,7 +98,8 @@ impl<'a> Topic<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::HEAD),
to_vec(head)?,
&to_vec(head)?,
&None,
)
}
@ -105,13 +109,14 @@ impl<'a> Topic<'a> {
&to_vec(&self.id)?,
Some(Self::HEAD),
&to_vec(head)?,
&None,
)
}
pub fn metadata(&self) -> Result<TopicMeta, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META))
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META), &None)
{
Ok(meta) => Ok(from_slice::<TopicMeta>(&meta)?),
Err(e) => Err(e),
@ -125,12 +130,17 @@ impl<'a> Topic<'a> {
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::META),
to_vec(meta)?,
&to_vec(meta)?,
&None,
)
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
self.store.del_all(
Self::PREFIX,
&to_vec(&self.id)?,
&Self::ALL_PROPERTIES,
&None,
)
}
}

@ -47,7 +47,7 @@ impl<'a> Wallet<'a> {
) -> Result<SymKey, StorageError> {
let mut result: Option<SymKey> = None;
self.store.write_transaction(&mut |tx| {
let got = tx.get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK));
let got = tx.get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK), &None);
match got {
Err(e) => {
if e == StorageError::NotFound {
@ -86,12 +86,12 @@ impl<'a> Wallet<'a> {
) -> Result<SymKey, StorageError> {
let symkey = SymKey::random();
let vec = symkey.slice().to_vec();
tx.put(prefix, key, Some(Self::SYM_KEY), &vec)?;
tx.put(prefix, key, Some(Self::SYM_KEY), &vec, &None)?;
Ok(symkey)
}
pub fn exists_single_key(&self, prefix: u8, key: &Vec<u8>) -> bool {
self.store
.get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK))
.get(prefix, key, Some(Self::SUFFIX_FOR_EXIST_CHECK), &None)
.is_ok()
}

@ -11,6 +11,7 @@
//! Broker singleton present in every instance of NextGraph (Client, Server, Core node)
use crate::actor::EActor;
use crate::connection::*;
use crate::errors::*;
use crate::server_storage::ServerStorage;
@ -66,7 +67,17 @@ pub struct ServerConfig {
pub bootstrap: BootstrapContent,
}
pub trait ILocalBroker: Send + Sync {}
/*pub trait EActor: Send + Sync + std::fmt::Debug {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError>;
}*/
#[async_trait::async_trait]
pub trait ILocalBroker: Send + Sync + EActor {
async fn deliver(&mut self, event: Event);
}
pub static BROKER: Lazy<Arc<RwLock<Broker>>> = Lazy::new(|| Arc::new(RwLock::new(Broker::new())));
@ -88,7 +99,8 @@ pub struct Broker<'a> {
tauri_streams: HashMap<String, Sender<Commit>>,
disconnections_sender: Sender<String>,
disconnections_receiver: Option<Receiver<String>>,
local_broker: Option<Box<dyn ILocalBroker + Send + Sync + 'a>>,
//local_broker: Option<Box<dyn ILocalBroker + Send + Sync + 'a>>,
local_broker: Option<Arc<RwLock<dyn ILocalBroker + 'a>>>,
}
impl<'a> Broker<'a> {
@ -148,9 +160,9 @@ impl<'a> Broker<'a> {
self.server_storage = Some(Box::new(storage));
}
pub fn set_local_broker(&mut self, broker: impl ILocalBroker + 'a) {
pub fn set_local_broker(&mut self, broker: Arc<RwLock<dyn ILocalBroker + 'a>>) {
//log_debug!("set_local_broker");
self.local_broker = Some(Box::new(broker));
self.local_broker = Some(broker);
}
pub fn set_server_config(&mut self, config: ServerConfig) {
@ -183,12 +195,11 @@ impl<'a> Broker<'a> {
.as_ref()
.ok_or(ProtocolError::BrokerError)
}
pub fn get_local_broker_mut(
&mut self,
) -> Result<&mut Box<dyn ILocalBroker + Send + Sync + 'a>, NgError> {
//log_debug!("GET STORAGE {:?}", self.server_storage);
self.local_broker.as_mut().ok_or(NgError::BrokerError)
//Option<Arc<RwLock<dyn ILocalBroker>>>,
pub fn get_local_broker(&self) -> Result<Arc<RwLock<dyn ILocalBroker + 'a>>, NgError> {
Ok(Arc::clone(
self.local_broker.as_ref().ok_or(NgError::BrokerError)?,
))
}
#[cfg(not(target_arch = "wasm32"))]
@ -340,18 +351,19 @@ impl<'a> Broker<'a> {
nuri: String,
obj_ref: ObjectRef,
) -> Result<ObjectContent, ProtocolError> {
let blockstream = self
.get_block_from_store_with_block_id(nuri, obj_ref.id, true)
.await?;
let store = Box::new(HashMapBlockStorage::from_block_stream(blockstream).await);
Object::load(obj_ref.id, Some(obj_ref.key), &store)
.map_err(|e| match e {
ObjectParseError::MissingBlocks(_missing) => ProtocolError::MissingBlocks,
_ => ProtocolError::ObjectParseError,
})?
.content()
.map_err(|_| ProtocolError::ObjectParseError)
unimplemented!();
// let blockstream = self
// .get_block_from_store_with_block_id(nuri, obj_ref.id, true)
// .await?;
// let store = Box::new(HashMapBlockStorage::from_block_stream(blockstream).await);
// Object::load(obj_ref.id, Some(obj_ref.key), &store)
// .map_err(|e| match e {
// ObjectParseError::MissingBlocks(_missing) => ProtocolError::MissingBlocks,
// _ => ProtocolError::ObjectParseError,
// })?
// .content()
// .map_err(|_| ProtocolError::ObjectParseError)
}
pub async fn doc_sync_branch(&mut self, anuri: String) -> (Receiver<Commit>, Sender<Commit>) {

@ -31,7 +31,7 @@ use async_std::sync::Mutex;
use either::Either;
use futures::{channel::mpsc, select, FutureExt, SinkExt};
use ng_repo::log::*;
use ng_repo::types::{DirectPeerId, PrivKey, PubKey, X25519PrivKey};
use ng_repo::types::{DirectPeerId, PrivKey, PubKey, UserId, X25519PrivKey};
use ng_repo::utils::{sign, verify};
use noise_protocol::{patterns::noise_xk, CipherState, HandshakeState};
use noise_rust_crypto::*;
@ -255,6 +255,13 @@ impl NoiseFSM {
}
}
pub fn user_id(&self) -> Option<UserId> {
match &self.config {
Some(start_config) => start_config.get_user(),
_ => None,
}
}
fn decrypt(&mut self, ciphertext: &Noise) -> Result<ProtocolMessage, ProtocolError> {
let ser = self
.noise_cipher_state_dec

@ -23,13 +23,13 @@ use std::{
pub trait BlockStorage: Send + Sync {
/// Load a block from the storage.
fn get(&self, id: &BlockId) -> Result<Block, StorageError>;
fn get(&self, overlay: &OverlayId, id: &BlockId) -> Result<Block, StorageError>;
/// Save a block to the storage.
fn put(&self, block: &Block) -> Result<BlockId, StorageError>;
fn put(&self, overlay: &OverlayId, block: &Block) -> Result<BlockId, StorageError>;
/// Delete a block from the storage.
fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError>;
fn del(&self, overlay: &OverlayId, id: &BlockId) -> Result<usize, StorageError>;
/// number of Blocks in the storage
fn len(&self) -> Result<usize, StorageError>;
@ -91,10 +91,10 @@ impl HashMapBlockStorage {
}
}
pub async fn from_block_stream(mut blockstream: Receiver<Block>) -> Self {
pub async fn from_block_stream(overlay: &OverlayId, mut blockstream: Receiver<Block>) -> Self {
let this = Self::new();
while let Some(block) = blockstream.next().await {
this.put(&block).unwrap();
this.put(overlay, &block).unwrap();
}
this
}
@ -114,7 +114,7 @@ impl HashMapBlockStorage {
}
impl BlockStorage for HashMapBlockStorage {
fn get(&self, id: &BlockId) -> Result<Block, StorageError> {
fn get(&self, overlay: &OverlayId, id: &BlockId) -> Result<Block, StorageError> {
match self.blocks.read().unwrap().get(id) {
Some(block) => {
let mut b = block.clone();
@ -133,7 +133,7 @@ impl BlockStorage for HashMapBlockStorage {
Ok(self.get_len())
}
fn put(&self, block: &Block) -> Result<BlockId, StorageError> {
fn put(&self, overlay: &OverlayId, block: &Block) -> Result<BlockId, StorageError> {
let id = block.id();
//log_debug!("PUTTING {}", id);
let mut b = block.clone();
@ -142,7 +142,7 @@ impl BlockStorage for HashMapBlockStorage {
Ok(id)
}
fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError> {
fn del(&self, overlay: &OverlayId, id: &BlockId) -> Result<usize, StorageError> {
let block = self
.blocks
.write()
@ -150,6 +150,6 @@ impl BlockStorage for HashMapBlockStorage {
.remove(id)
.ok_or(StorageError::NotFound)?;
let size = size_of_val(&block);
Ok((block, size))
Ok(size)
}
}

@ -17,6 +17,7 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
use crate::block_storage::*;
use crate::errors::*;
use crate::object::*;
use crate::store::Store;
use crate::types::*;
use crate::utils::encrypt_in_place;
@ -97,7 +98,7 @@ impl Branch {
target_heads: &[ObjectId],
known_heads: &[ObjectId],
//their_filter: &BloomFilter,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> Result<Vec<ObjectId>, ObjectParseError> {
//log_debug!(">> sync_req");
//log_debug!(" target_heads: {:?}", target_heads);
@ -108,7 +109,7 @@ impl Branch {
/// optionally collecting the missing objects/blocks that couldn't be found locally on the way
fn load_causal_past(
cobj: &Object,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
theirs: &HashSet<ObjectId>,
visited: &mut HashSet<ObjectId>,
missing: &mut Option<&mut HashSet<ObjectId>>,
@ -179,26 +180,27 @@ mod test {
//use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership};
struct Test<'a> {
storage: Box<dyn BlockStorage + Send + Sync + 'a>,
}
impl<'a> Test<'a> {
fn storage(s: impl BlockStorage + 'a) -> Self {
Test {
storage: Box::new(s),
}
}
fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
&self.storage
}
}
// struct Test<'a> {
// storage: Box<dyn BlockStorage + Send + Sync + 'a>,
// }
// impl<'a> Test<'a> {
// fn storage(s: impl BlockStorage + 'a) -> Self {
// Test {
// storage: Box::new(s),
// }
// }
// fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
// &self.storage
// }
// }
use crate::branch::*;
use crate::repo::Repo;
use crate::log::*;
use crate::store::Store;
use crate::utils::*;
#[test]
@ -206,18 +208,10 @@ mod test {
fn add_obj(
content: ObjectContentV0,
header: Option<CommitHeader>,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> ObjectRef {
let max_object_size = 4000;
let mut obj = Object::new(
ObjectContent::V0(content),
header,
max_object_size,
store_pubkey,
store_secret,
);
let mut obj = Object::new(ObjectContent::V0(content), header, max_object_size, store);
log_debug!(">>> add_obj");
log_debug!(" id: {:?}", obj.id());
log_debug!(" header: {:?}", obj.header());
@ -233,16 +227,14 @@ mod test {
deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>,
body_ref: ObjectRef,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> ObjectRef {
let header = CommitHeader::new_with_deps_and_acks(
deps.iter().map(|r| r.id).collect(),
acks.iter().map(|r| r.id).collect(),
);
let overlay = store_pubkey.overlay_id_for_read_purpose();
let overlay = store.get_store_repo().overlay_id_for_read_purpose();
let obj_ref = ObjectRef {
id: ObjectId::Blake3Digest32([1; 32]),
@ -268,57 +260,34 @@ mod test {
)
.unwrap();
//log_debug!("commit: {:?}", commit);
add_obj(
ObjectContentV0::Commit(Commit::V0(commit)),
header,
store_pubkey,
store_secret,
store,
)
add_obj(ObjectContentV0::Commit(Commit::V0(commit)), header, store)
}
fn add_body_branch(
branch: BranchV0,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
) -> ObjectRef {
fn add_body_branch(branch: BranchV0, store: &Store) -> ObjectRef {
let body: CommitBodyV0 = CommitBodyV0::Branch(Branch::V0(branch));
//log_debug!("body: {:?}", body);
add_obj(
ObjectContentV0::CommitBody(CommitBody::V0(body)),
None,
store_pubkey,
store_secret,
store,
)
}
fn add_body_trans(
header: Option<CommitHeader>,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
) -> ObjectRef {
fn add_body_trans(header: Option<CommitHeader>, store: &Store) -> ObjectRef {
let content = [7u8; 777].to_vec();
let body = CommitBodyV0::AsyncTransaction(Transaction::V0(content));
//log_debug!("body: {:?}", body);
add_obj(
ObjectContentV0::CommitBody(CommitBody::V0(body)),
header,
store_pubkey,
store_secret,
store,
)
}
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
// repo
let (repo_privkey, repo_pubkey) = generate_keypair();
let (store_repo, repo_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_with_key(repo_pubkey);
// branch
@ -332,8 +301,8 @@ mod test {
&repo_pubkey,
&member_pubkey,
&[PermissionV0::WriteAsync],
store_repo.overlay_id_for_read_purpose(),
t.s(),
store.get_store_repo().overlay_id_for_read_purpose(),
store,
);
let repo_ref = ObjectRef {
@ -369,14 +338,9 @@ mod test {
// commit bodies
let branch_body = add_body_branch(
branch.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
);
let branch_body = add_body_branch(branch.clone(), &repo.store);
let trans_body = add_body_trans(None, &store_repo, &repo_secret, repo.get_storage());
let trans_body = add_body_trans(None, &repo.store);
// create & add commits to store
@ -389,9 +353,7 @@ mod test {
vec![],
vec![],
branch_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
log_debug!(">> t1");
@ -403,9 +365,7 @@ mod test {
vec![br.clone()],
vec![],
trans_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
log_debug!(">> t2");
@ -417,9 +377,7 @@ mod test {
vec![br.clone()],
vec![],
trans_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
// log_debug!(">> a3");
@ -445,9 +403,7 @@ mod test {
vec![t2.clone()],
vec![t1.clone()],
trans_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
log_debug!(">> t5");
@ -459,9 +415,7 @@ mod test {
vec![t1.clone(), t2.clone()],
vec![t4.clone()],
trans_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
log_debug!(">> a6");
@ -473,9 +427,7 @@ mod test {
vec![t4.clone()],
vec![],
trans_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
log_debug!(">> a7");
@ -487,12 +439,10 @@ mod test {
vec![t4.clone()],
vec![],
trans_body.clone(),
&store_repo,
&repo_secret,
repo.get_storage(),
&repo.store,
);
let c7 = Commit::load(a7.clone(), repo.get_storage(), true).unwrap();
let c7 = Commit::load(a7.clone(), &repo.store, true).unwrap();
c7.verify(&repo).unwrap();
// let mut filter = Filter::new(FilterBuilder::new(10, 0.01));
@ -517,7 +467,7 @@ mod test {
&[t5.id, a6.id, a7.id],
&[t5.id],
//&their_commits,
repo.get_storage(),
&repo.store,
)
.unwrap();

@ -20,6 +20,7 @@ use crate::errors::*;
use crate::log::*;
use crate::object::*;
use crate::repo::Repo;
use crate::store::Store;
use crate::types::*;
use crate::utils::*;
use std::collections::HashSet;
@ -124,13 +125,7 @@ impl CommitV0 {
})
}
pub fn save(
&mut self,
block_size: usize,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
) -> Result<ObjectRef, StorageError> {
pub fn save(&mut self, block_size: usize, store: &Store) -> Result<ObjectRef, StorageError> {
if self.id.is_some() && self.key.is_some() {
return Ok(ObjectRef::from_id_key(
self.id.unwrap(),
@ -142,8 +137,7 @@ impl CommitV0 {
ObjectContent::V0(ObjectContentV0::Commit(Commit::V0(self.clone()))),
self.header.clone(),
block_size,
store_pubkey,
store_secret,
store,
);
self.blocks = obj.save(store)?;
if let Some(h) = &mut self.header {
@ -222,9 +216,7 @@ impl Commit {
deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>,
body: CommitBody,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
storage: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> Result<Commit, NgError> {
Self::new_with_body_and_save(
author_privkey,
@ -240,9 +232,7 @@ impl Commit {
vec![],
body,
0,
store_pubkey,
store_secret,
storage,
store,
)
}
@ -261,14 +251,10 @@ impl Commit {
metadata: Vec<u8>,
body: CommitBody,
block_size: usize,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
storage: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> Result<Commit, NgError> {
let (body_ref, mut saved_body) =
body.clone()
.save(block_size, store_pubkey, store_secret, storage)?;
let overlay = store_pubkey.overlay_id_for_read_purpose();
let (body_ref, mut saved_body) = body.clone().save(block_size, store)?;
let overlay = store.get_store_repo().overlay_id_for_read_purpose();
let mut commit_v0 = CommitV0::new(
author_privkey,
author_pubkey,
@ -285,7 +271,7 @@ impl Commit {
body_ref,
)?;
commit_v0.body.set(body).unwrap();
let _commit_ref = commit_v0.save(block_size, store_pubkey, store_secret, storage)?;
let _commit_ref = commit_v0.save(block_size, store)?;
commit_v0.blocks.append(&mut saved_body);
Ok(Commit::V0(commit_v0))
@ -302,15 +288,9 @@ impl Commit {
}
}
pub fn save(
&mut self,
block_size: usize,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
) -> Result<ObjectRef, StorageError> {
pub fn save(&mut self, block_size: usize, store: &Store) -> Result<ObjectRef, StorageError> {
match self {
Commit::V0(v0) => v0.save(block_size, store_pubkey, store_secret, store),
Commit::V0(v0) => v0.save(block_size, store),
}
}
@ -323,7 +303,7 @@ impl Commit {
/// Load commit from store
pub fn load(
commit_ref: ObjectRef,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
with_body: bool,
) -> Result<Commit, CommitLoadError> {
let (id, key) = (commit_ref.id, commit_ref.key);
@ -354,10 +334,7 @@ impl Commit {
}
/// Load commit body from store
pub fn load_body(
&self,
store: &Box<impl BlockStorage + ?Sized>,
) -> Result<&CommitBody, CommitLoadError> {
pub fn load_body(&self, store: &Store) -> Result<&CommitBody, CommitLoadError> {
if self.body().is_some() {
return Ok(self.body().unwrap());
}
@ -455,10 +432,7 @@ impl Commit {
}
}
pub fn owners_signature_required(
&self,
store: &Box<impl BlockStorage + ?Sized>,
) -> Result<bool, CommitLoadError> {
pub fn owners_signature_required(&self, store: &Store) -> Result<bool, CommitLoadError> {
match self.load_body(store)? {
CommitBody::V0(CommitBodyV0::UpdateRootBranch(new_root)) => {
// load deps (the previous RootBranch commit)
@ -635,7 +609,7 @@ impl Commit {
/// or a list of missing blocks
pub fn verify_full_object_refs_of_branch_at_commit(
&self,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> Result<Vec<ObjectId>, CommitLoadError> {
//log_debug!(">> verify_full_object_refs_of_branch_at_commit: #{}", self.seq());
@ -643,7 +617,7 @@ impl Commit {
/// and collect missing `ObjectId`s
fn load_direct_object_refs(
commit: &Commit,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>,
) -> Result<(), CommitLoadError> {
@ -727,7 +701,7 @@ impl Commit {
}
self.verify_sig(repo)?;
self.verify_perm(repo)?;
self.verify_full_object_refs_of_branch_at_commit(repo.get_storage())?;
//self.verify_full_object_refs_of_branch_at_commit(repo.store.unwrap())?;
Ok(())
}
}
@ -770,16 +744,13 @@ impl CommitBody {
pub fn save(
self,
block_size: usize,
store_pubkey: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> Result<(ObjectRef, Vec<BlockId>), StorageError> {
let obj = Object::new(
ObjectContent::V0(ObjectContentV0::CommitBody(self)),
None,
block_size,
store_pubkey,
store_secret,
store,
);
let blocks = obj.save(store)?;
Ok((obj.reference().unwrap(), blocks))
@ -1457,20 +1428,20 @@ mod test {
use crate::commit::*;
use crate::log::*;
struct Test<'a> {
storage: Box<dyn BlockStorage + Send + Sync + 'a>,
}
// struct Test<'a> {
// storage: Box<dyn BlockStorage + Send + Sync + 'a>,
// }
impl<'a> Test<'a> {
fn storage(s: impl BlockStorage + 'a) -> Self {
Test {
storage: Box::new(s),
}
}
fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
&self.storage
}
}
// impl<'a> Test<'a> {
// fn storage(s: impl BlockStorage + 'a) -> Self {
// Test {
// storage: Box::new(s),
// }
// }
// fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
// &self.storage
// }
// }
fn test_commit_header_ref_content_fits(
obj_refs: Vec<BlockRef>,
@ -1510,20 +1481,13 @@ mod test {
let max_object_size = 0;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let hashmap_storage = HashMapBlockStorage::new();
let storage = Box::new(hashmap_storage);
let store = Store::dummy_public_v0();
let commit_ref = commit
.save(max_object_size, &store_repo, &store_secret, &storage)
.expect("save commit");
let commit_ref = commit.save(max_object_size, &store).expect("save commit");
let commit_object = Object::load(
commit_ref.id.clone(),
Some(commit_ref.key.clone()),
&storage,
)
.expect("load object from storage");
let commit_object =
Object::load(commit_ref.id.clone(), Some(commit_ref.key.clone()), &store)
.expect("load object from storage");
assert_eq!(
commit_object.acks(),
@ -1536,7 +1500,7 @@ mod test {
assert_eq!(commit_object.all_blocks_len(), expect_blocks_len);
let commit = Commit::load(commit_ref, &storage, false).expect("load commit from storage");
let commit = Commit::load(commit_ref, &store, false).expect("load commit from storage");
log_debug!("{}", commit);
}
@ -1569,22 +1533,16 @@ mod test {
let max_object_size = 0;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
let obj = Object::new(
content.clone(),
None,
max_object_size,
&store_repo,
&store_secret,
);
let obj = Object::new(content.clone(), None, max_object_size, &store);
let hashmap_storage = HashMapBlockStorage::new();
let storage = Box::new(hashmap_storage);
_ = obj.save(&storage).expect("save object");
_ = obj.save(&store).expect("save object");
let commit = Commit::load(obj.reference().unwrap(), &storage, false);
let commit = Commit::load(obj.reference().unwrap(), &store, false);
assert_eq!(commit, Err(CommitLoadError::NotACommitError));
}
@ -1611,9 +1569,7 @@ mod test {
let max_object_size = 0;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let hashmap_storage = HashMapBlockStorage::new();
let storage = Box::new(hashmap_storage);
let store = Store::dummy_public_v0();
let commit = Commit::new_with_body_and_save(
&priv_key,
@ -1629,15 +1585,13 @@ mod test {
metadata,
body,
max_object_size,
&store_repo,
&store_secret,
&storage,
&store,
)
.expect("commit::new_with_body_and_save");
log_debug!("{}", commit);
let commit2 = Commit::load(commit.reference().unwrap(), &storage, true)
let commit2 = Commit::load(commit.reference().unwrap(), &store, true)
.expect("load commit with body after save");
log_debug!("{}", commit2);
@ -1676,19 +1630,16 @@ mod test {
.unwrap();
log_debug!("{}", commit);
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let store = Store::dummy_public_v0();
let repo = Repo::new_with_perms(&[PermissionV0::Create], store);
let repo =
Repo::new_with_member(&pub_key, &pub_key, &[PermissionV0::Create], overlay, t.s());
match commit.load_body(repo.get_storage()) {
Ok(_b) => panic!("Body should not exist"),
Err(CommitLoadError::BodyLoadError(missing)) => {
assert_eq!(missing.len(), 1);
}
Err(e) => panic!("Commit load error: {:?}", e),
}
// match commit.load_body(repo.store.unwrap()) {
// Ok(_b) => panic!("Body should not exist"),
// Err(CommitLoadError::BodyLoadError(missing)) => {
// assert_eq!(missing.len(), 1);
// }
// Err(e) => panic!("Commit load error: {:?}", e),
// }
commit.verify_sig(&repo).expect("verify signature");
match commit.verify_perm(&repo) {
@ -1699,13 +1650,13 @@ mod test {
Err(e) => panic!("Commit verify perm error: {:?}", e),
}
match commit.verify_full_object_refs_of_branch_at_commit(repo.get_storage()) {
Ok(_) => panic!("Commit should not be Ok"),
Err(CommitLoadError::BodyLoadError(missing)) => {
assert_eq!(missing.len(), 1);
}
Err(e) => panic!("Commit verify error: {:?}", e),
}
// match commit.verify_full_object_refs_of_branch_at_commit(repo.store.unwrap()) {
// Ok(_) => panic!("Commit should not be Ok"),
// Err(CommitLoadError::BodyLoadError(missing)) => {
// assert_eq!(missing.len(), 1);
// }
// Err(e) => panic!("Commit verify error: {:?}", e),
// }
match commit.verify(&repo) {
Ok(_) => panic!("Commit should not be Ok"),
@ -1734,9 +1685,7 @@ mod test {
let max_object_size = 0;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let store = Store::dummy_public_v0();
let commit = Commit::new_with_body_and_save(
&priv_key,
@ -1752,23 +1701,15 @@ mod test {
metadata,
body,
max_object_size,
&store_repo,
&store_secret,
t.s(),
&store,
)
.expect("commit::new_with_body_and_save");
log_debug!("{}", commit);
let repo = Repo::new_with_member(
&pub_key,
&pub_key,
&[PermissionV0::Create],
store_repo.overlay_id_for_read_purpose(),
t.s(),
);
let repo = Repo::new_with_perms(&[PermissionV0::Create], store);
commit.load_body(repo.get_storage()).expect("load body");
commit.load_body(&repo.store).expect("load body");
commit.verify_sig(&repo).expect("verify signature");
commit.verify_perm(&repo).expect("verify perms");
@ -1777,7 +1718,7 @@ mod test {
.expect("verify_perm_creation");
commit
.verify_full_object_refs_of_branch_at_commit(repo.get_storage())
.verify_full_object_refs_of_branch_at_commit(&repo.store)
.expect("verify is at root of branch and singleton");
commit.verify(&repo).expect("verify");
@ -1792,7 +1733,7 @@ mod test {
let metadata = Vec::from("some metadata");
//let max_object_size = 0;
//let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
//let store = Store::dummy_public_v0();
let commit = Commit::V0(
CommitV0::new_with_invalid_header(
@ -1808,16 +1749,8 @@ mod test {
log_debug!("{}", commit);
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let repo = Repo::new_with_member(
&pub_key,
&pub_key,
&[PermissionV0::Create],
OverlayId::dummy(),
t.s(),
);
let store = Store::dummy_public_v0();
let repo = Repo::new_with_perms(&[PermissionV0::Create], store);
assert_eq!(
commit.verify(&repo),

@ -159,6 +159,7 @@ pub enum StorageError {
SerializationError,
AlreadyExists,
DataCorruption,
UnknownColumnFamily,
}
impl core::fmt::Display for StorageError {

@ -11,6 +11,7 @@
use crate::block_storage::*;
use crate::errors::*;
use crate::object::*;
use crate::store::Store;
use crate::types::*;
use crate::utils::*;
use core::fmt;
@ -55,13 +56,12 @@ impl fmt::Display for EventContentV0 {
impl Event {
pub fn new<'a>(
publisher: &PrivKey,
seq: &mut u64,
seq: u64,
commit: &Commit,
additional_blocks: &Vec<BlockId>,
topic_id: TopicId,
branch_read_cap_secret: ReadCapSecret,
topic_priv_key: &BranchWriteCapSecret,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &'a Store,
) -> Result<Event, NgError> {
Ok(Event::V0(EventV0::new(
publisher,
@ -69,9 +69,8 @@ impl Event {
commit,
additional_blocks,
topic_id,
branch_read_cap_secret,
topic_priv_key,
storage,
store,
)?))
}
}
@ -79,27 +78,27 @@ impl Event {
impl EventV0 {
pub fn new<'a>(
publisher: &PrivKey,
seq: &mut u64,
seq: u64,
commit: &Commit,
additional_blocks: &Vec<BlockId>,
topic_id: TopicId,
branch_read_cap_secret: ReadCapSecret,
topic_priv_key: &BranchWriteCapSecret,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &'a Store,
) -> Result<EventV0, NgError> {
let branch_read_cap_secret = &store.get_store_readcap().key;
let mut blocks = vec![];
for bid in commit.blocks().iter() {
blocks.push(storage.get(bid)?);
blocks.push(store.get(bid)?);
}
for bid in additional_blocks.iter() {
blocks.push(storage.get(bid)?);
blocks.push(store.get(bid)?);
}
(*seq) += 1;
// (*seq) += 1;
let publisher_pubkey = publisher.to_pub();
let event_content = EventContentV0 {
topic: topic_id,
publisher: PeerId::Forwarded(publisher_pubkey),
seq: *seq,
seq,
blocks,
file_ids: commit
.header()

@ -21,6 +21,7 @@ use crate::block_storage::*;
use crate::errors::*;
use crate::log::*;
use crate::object::*;
use crate::store::Store;
use crate::types::*;
/// File errors
@ -80,21 +81,17 @@ pub struct File<'a> {
}
impl<'a> File<'a> {
pub fn open(
id: ObjectId,
key: SymKey,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
) -> Result<File<'a>, FileError> {
let root_block = storage.get(&id)?;
pub fn open(id: ObjectId, key: SymKey, store: &'a Store) -> Result<File<'a>, FileError> {
let root_block = store.get(&id)?;
if root_block.children().len() == 2
&& *root_block.content().commit_header_obj() == CommitHeaderObject::RandomAccess
{
Ok(File {
internal: Box::new(RandomAccessFile::open(id, key, storage)?),
internal: Box::new(RandomAccessFile::open(id, key, store)?),
})
} else {
let obj = Object::load(id, Some(key), storage)?;
let obj = Object::load(id, Some(key), store)?;
match obj.content_v0()? {
ObjectContentV0::SmallFile(small_file) => Ok(File {
internal: Box::new(small_file),
@ -134,7 +131,7 @@ impl ReadFile for SmallFileV0 {
/// A RandomAccessFile in memory. This is not used to serialize data
pub struct RandomAccessFile<'a> {
//storage: Arc<&'a dyn BlockStorage>,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &'a Store,
/// accurate once saved or opened
meta: RandomAccessFileMeta,
@ -177,7 +174,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> {
let mut level_pos = pos;
for level in 0..depth {
let tree_block = self.storage.get(&current_block_id_key.0)?;
let tree_block = self.store.get(&current_block_id_key.0)?;
let (children, content) = tree_block.read(&current_block_id_key.1)?;
if children.len() == 0 || content.len() > 0 {
return Err(FileError::BlockDeserializeError);
@ -192,7 +189,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> {
level_pos = pos as usize % factor;
}
let content_block = self.storage.get(&current_block_id_key.0)?;
let content_block = self.store.get(&current_block_id_key.0)?;
//log_debug!("CONTENT BLOCK SIZE {}", content_block.size());
let (children, content) = content_block.read(&current_block_id_key.1)?;
@ -228,7 +225,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> {
return Err(FileError::EndOfFile);
}
let block = &self.blocks[index];
let content_block = self.storage.get(&block.0)?;
let content_block = self.store.get(&block.0)?;
let (children, content) = content_block.read(&block.1)?;
if children.len() == 0 && content.len() > 0 {
//log_debug!("CONTENT SIZE {}", content.len());
@ -263,7 +260,7 @@ impl<'a> RandomAccessFile<'a> {
conv_key: &[u8; blake3::OUT_LEN],
children: Vec<ObjectId>,
already_existing: &mut HashMap<BlockKey, BlockId>,
storage: &Box<dyn BlockStorage + Send + Sync + 'a>,
store: &Store,
) -> Result<(BlockId, BlockKey), StorageError> {
let key_hash = blake3::keyed_hash(conv_key, &content);
@ -286,7 +283,7 @@ impl<'a> RandomAccessFile<'a> {
let id = block.get_and_save_id();
already_existing.insert(key.clone(), id);
//log_debug!("putting *** {}", id);
storage.put(&block)?;
store.put(&block)?;
Ok((id, key))
}
@ -294,7 +291,7 @@ impl<'a> RandomAccessFile<'a> {
conv_key: &[u8; blake3::OUT_LEN],
children: Vec<(BlockId, BlockKey)>,
already_existing: &mut HashMap<BlockKey, BlockId>,
storage: &Box<dyn BlockStorage + Send + Sync + 'a>,
store: &Store,
) -> Result<(BlockId, BlockKey), StorageError> {
let mut ids: Vec<BlockId> = Vec::with_capacity(children.len());
let mut keys: Vec<BlockKey> = Vec::with_capacity(children.len());
@ -305,7 +302,7 @@ impl<'a> RandomAccessFile<'a> {
let content = ChunkContentV0::InternalNode(keys);
let content_ser = serde_bare::to_vec(&content).unwrap();
Self::make_block(content_ser, conv_key, ids, already_existing, storage)
Self::make_block(content_ser, conv_key, ids, already_existing, store)
}
/// Build tree from leaves, returns parent nodes
@ -314,7 +311,7 @@ impl<'a> RandomAccessFile<'a> {
leaves: &[(BlockId, BlockKey)],
conv_key: &ChaCha20Key,
arity: u16,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &Store,
) -> Result<(BlockId, BlockKey), StorageError> {
let mut parents: Vec<(BlockId, BlockKey)> = vec![];
let mut chunks = leaves.chunks(arity as usize);
@ -324,19 +321,13 @@ impl<'a> RandomAccessFile<'a> {
conv_key,
nodes.to_vec(),
already_existing,
storage,
store,
)?);
}
//log_debug!("level with {} parents", parents.len());
if 1 < parents.len() {
return Self::make_tree(
already_existing,
parents.as_slice(),
conv_key,
arity,
storage,
);
return Self::make_tree(already_existing, parents.as_slice(), conv_key, arity, store);
}
Ok(parents[0].clone())
}
@ -347,7 +338,7 @@ impl<'a> RandomAccessFile<'a> {
blocks: &[(BlockId, BlockKey)],
meta: &mut RandomAccessFileMeta,
conv_key: &ChaCha20Key,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &Store,
) -> Result<((BlockId, BlockKey), (BlockId, BlockKey)), FileError> {
let leaf_blocks_nbr = blocks.len();
let arity = meta.arity();
@ -370,7 +361,7 @@ impl<'a> RandomAccessFile<'a> {
blocks[0].clone()
} else {
// we create the tree
Self::make_tree(already_existing, &blocks, &conv_key, arity, storage)?
Self::make_tree(already_existing, &blocks, &conv_key, arity, store)?
};
let meta_object = Object::new_with_convergence_key(
@ -380,7 +371,7 @@ impl<'a> RandomAccessFile<'a> {
conv_key,
);
//log_debug!("saving meta object");
_ = meta_object.save(storage)?;
_ = meta_object.save(store)?;
// creating the root block that contains as first child the meta_object, and as second child the content_block
// it is added to storage in make_parent_block
@ -392,21 +383,20 @@ impl<'a> RandomAccessFile<'a> {
content_block.clone(),
],
already_existing,
storage,
store,
)?;
Ok((content_block, root_block))
}
/// Creates a new file based on a content that is fully known at the time of creation.
///
/// If you want to stream progressively the content into the new file, you should use new_empty(), write() and save() instead
pub fn new_from_slice(
content: &[u8],
block_size: usize,
content_type: String,
metadata: Vec<u8>,
store: &StoreRepo,
store_secret: &ReadCapSecret,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &'a Store,
) -> Result<RandomAccessFile<'a>, FileError> {
//let max_block_size = store_max_value_size();
let valid_block_size = store_valid_value_size(block_size) - BLOCK_EXTRA;
@ -415,7 +405,7 @@ impl<'a> RandomAccessFile<'a> {
let total_size = content.len() as u64;
let mut conv_key = Object::convergence_key(store, store_secret);
let mut conv_key = Object::convergence_key(store);
let mut blocks: Vec<(BlockId, BlockKey)> = vec![];
@ -430,7 +420,7 @@ impl<'a> RandomAccessFile<'a> {
&conv_key,
vec![],
&mut already_existing,
storage,
store,
)?);
}
assert_eq!(
@ -447,18 +437,13 @@ impl<'a> RandomAccessFile<'a> {
depth: 0,
});
let (content_block, root_block) = Self::save_(
&mut already_existing,
&blocks,
&mut meta,
&conv_key,
storage,
)?;
let (content_block, root_block) =
Self::save_(&mut already_existing, &blocks, &mut meta, &conv_key, store)?;
conv_key.zeroize();
Ok(Self {
storage,
store,
meta,
block_contents: HashMap::new(), // not used in this case
blocks: vec![], // not used in this case
@ -475,9 +460,7 @@ impl<'a> RandomAccessFile<'a> {
block_size: usize,
content_type: String,
metadata: Vec<u8>,
store: &StoreRepo,
store_secret: &ReadCapSecret,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &'a Store,
) -> Self {
let valid_block_size = store_valid_value_size(block_size) - BLOCK_EXTRA;
@ -493,14 +476,14 @@ impl<'a> RandomAccessFile<'a> {
});
Self {
storage,
store,
meta,
block_contents: HashMap::new(),
blocks: vec![],
id: None,
key: None,
content_block: None,
conv_key: Some(Object::convergence_key(store, store_secret)),
conv_key: Some(Object::convergence_key(store)),
remainder: vec![],
size: 0,
}
@ -535,7 +518,7 @@ impl<'a> RandomAccessFile<'a> {
&conv_key,
vec![],
&mut already_existing,
self.storage,
self.store,
)?);
} else {
// not enough data to create a new block
@ -558,7 +541,7 @@ impl<'a> RandomAccessFile<'a> {
&conv_key,
vec![],
&mut already_existing,
self.storage,
self.store,
)?);
} else {
self.remainder = Vec::from(chunck);
@ -585,7 +568,7 @@ impl<'a> RandomAccessFile<'a> {
&self.conv_key.unwrap(),
vec![],
&mut HashMap::new(),
self.storage,
self.store,
)?);
}
@ -597,7 +580,7 @@ impl<'a> RandomAccessFile<'a> {
&self.blocks,
&mut self.meta,
self.conv_key.as_ref().unwrap(),
self.storage,
self.store,
)?;
self.conv_key.as_mut().unwrap().zeroize();
@ -617,10 +600,10 @@ impl<'a> RandomAccessFile<'a> {
pub fn open(
id: ObjectId,
key: SymKey,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: &'a Store,
) -> Result<RandomAccessFile<'a>, FileError> {
// load root block
let root_block = storage.get(&id)?;
let root_block = store.get(&id)?;
if root_block.children().len() != 2
|| *root_block.content().commit_header_obj() != CommitHeaderObject::RandomAccess
@ -634,7 +617,7 @@ impl<'a> RandomAccessFile<'a> {
let meta_object = Object::load(
root_sub_blocks[0].0,
Some(root_sub_blocks[0].1.clone()),
storage,
store,
)?;
let meta = match meta_object.content_v0()? {
@ -643,7 +626,7 @@ impl<'a> RandomAccessFile<'a> {
};
Ok(RandomAccessFile {
storage,
store,
meta,
block_contents: HashMap::new(), // not used in this case
blocks: vec![], // not used in this case
@ -659,7 +642,7 @@ impl<'a> RandomAccessFile<'a> {
pub fn blocks(&self) -> impl Iterator<Item = Block> + '_ {
self.blocks
.iter()
.map(|key| self.storage.get(&key.0).unwrap())
.map(|key| self.store.get(&key.0).unwrap())
}
/// Size once encoded, before deduplication. Only available before save()
@ -674,7 +657,7 @@ impl<'a> RandomAccessFile<'a> {
let mut total = 0;
self.block_contents
.values()
.for_each(|b| total += self.storage.get(b).unwrap().size());
.for_each(|b| total += self.store.get(b).unwrap().size());
total
}
@ -734,20 +717,20 @@ mod test {
use std::io::BufReader;
use std::io::Read;
struct Test<'a> {
storage: Box<dyn BlockStorage + Send + Sync + 'a>,
}
impl<'a> Test<'a> {
fn storage(s: impl BlockStorage + 'a) -> Self {
Test {
storage: Box::new(s),
}
}
fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
&self.storage
}
}
// struct Test<'a> {
// storage: Box<dyn BlockStorage + Send + Sync + 'a>,
// }
// impl<'a> Test<'a> {
// fn storage(s: impl BlockStorage + 'a) -> Self {
// Test {
// storage: Box::new(s),
// }
// }
// fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
// &self.store
// }
// }
/// Checks that a content that does fit in one block, creates an arity of 0
#[test]
@ -755,15 +738,10 @@ mod test {
let block_size = store_max_value_size();
//store_valid_value_size(0)
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
//let storage: Arc<&dyn BlockStorage> = Arc::new(&hashmap_storage);
////// 1 MB of data!
let data_size = block_size - BLOCK_EXTRA;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 1MB of data");
let content: Vec<u8> = vec![99; data_size];
@ -773,9 +751,7 @@ mod test {
block_size,
"text/plain".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -818,9 +794,9 @@ mod test {
// MAX_ARITY_LEAVES * (MAX_ARITY_LEAVES + 1) * MAX_ARITY_LEAVES + MAX_ARITY_LEAVES + 1
// );
assert_eq!(file.depth(), Ok(0));
assert_eq!(t.s().len(), Ok(3));
assert_eq!(store.len(), Ok(3));
let file = RandomAccessFile::open(id, file.key.unwrap(), t.s()).expect("re open");
let file = RandomAccessFile::open(id, file.key.unwrap(), &store).expect("re open");
log_debug!("{}", file);
@ -834,13 +810,10 @@ mod test {
const MAX_ARITY_LEAVES: usize = 15887;
const MAX_DATA_PAYLOAD_SIZE: usize = 1048564;
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
////// 16 GB of data!
let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 16GB of data");
let content: Vec<u8> = vec![99; data_size];
@ -851,9 +824,7 @@ mod test {
store_max_value_size(),
"text/plain".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -864,7 +835,7 @@ mod test {
assert_eq!(file.depth(), Ok(1));
assert_eq!(t.s().len(), Ok(4));
assert_eq!(store.len(), Ok(4));
}
/// Checks that a content that doesn't fit in all the children of first level in tree
@ -873,13 +844,10 @@ mod test {
const MAX_ARITY_LEAVES: usize = 15887;
const MAX_DATA_PAYLOAD_SIZE: usize = 1048564;
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
////// 16 GB of data!
let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE + 1;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 16GB of data");
let content: Vec<u8> = vec![99; data_size];
@ -889,9 +857,7 @@ mod test {
store_max_value_size(),
"text/plain".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -903,7 +869,7 @@ mod test {
assert_eq!(file.depth().unwrap(), 2);
assert_eq!(t.s().len(), Ok(7));
assert_eq!(store.len(), Ok(7));
}
/// Checks that a content that doesn't fit in all the children of first level in tree
@ -911,14 +877,12 @@ mod test {
pub fn test_depth_3() {
const MAX_ARITY_LEAVES: usize = 61;
const MAX_DATA_PAYLOAD_SIZE: usize = 4084;
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
////// 900 MB of data!
let data_size =
MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 900MB of data");
let content: Vec<u8> = vec![99; data_size];
@ -928,9 +892,7 @@ mod test {
store_valid_value_size(0),
"text/plain".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -965,7 +927,7 @@ mod test {
// );
assert_eq!(file.depth().unwrap(), 3);
assert_eq!(t.s().len(), Ok(6));
assert_eq!(store.len(), Ok(6));
}
/// Checks that a content that doesn't fit in all the children of first level in tree
@ -981,10 +943,7 @@ mod test {
* MAX_ARITY_LEAVES
* MAX_DATA_PAYLOAD_SIZE;
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 55GB of data");
let content: Vec<u8> = vec![99; data_size];
@ -994,9 +953,7 @@ mod test {
store_valid_value_size(0),
"text/plain".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
)
.expect("new_from_slice");
@ -1009,7 +966,7 @@ mod test {
assert_eq!(file.depth().unwrap(), 4);
assert_eq!(t.s().len(), Ok(7));
assert_eq!(store.len(), Ok(7));
}
/// Test async write to a file all at once
@ -1022,19 +979,14 @@ mod test {
.read_to_end(&mut img_buffer)
.expect("read of test.jpg");
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating file with the JPG content");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_max_value_size(), //store_valid_value_size(0),//
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1097,19 +1049,14 @@ mod test {
.read_to_end(&mut img_buffer)
.expect("read of test.jpg");
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating file with the JPG content");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_max_value_size(), //store_valid_value_size(0),//
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1174,19 +1121,14 @@ mod test {
.read_to_end(&mut img_buffer)
.expect("read of test.jpg");
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating file with the JPG content");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1259,19 +1201,14 @@ mod test {
let first_block_content = img_buffer[0..4084].to_vec();
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating file with the JPG content");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1342,19 +1279,14 @@ mod test {
let chunk_nbr = data_size / 5000000;
let last_chunk = data_size % 5000000;
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating empty file");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1383,7 +1315,7 @@ mod test {
assert_eq!(file.depth().unwrap(), 4);
assert_eq!(t.s().len(), Ok(7));
assert_eq!(store.len(), Ok(7));
}
/// Test open
@ -1396,19 +1328,14 @@ mod test {
.read_to_end(&mut img_buffer)
.expect("read of test.jpg");
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating file with the JPG content");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_max_value_size(), //store_valid_value_size(0),//
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1419,7 +1346,7 @@ mod test {
file.save().expect("save");
let file2 = RandomAccessFile::open(file.id().unwrap(), file.key.unwrap(), t.s())
let file2 = RandomAccessFile::open(file.id().unwrap(), file.key.unwrap(), &store)
.expect("reopen file");
// this works only because store_max_value_size() is bigger than the actual size of the JPEG file. so it fits in one block.
@ -1459,17 +1386,14 @@ mod test {
let content = ObjectContent::new_file_v0_with_content(img_buffer.clone(), "image/jpeg");
let max_object_size = store_max_value_size();
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let mut obj = Object::new(content, None, max_object_size, &store_repo, &store_secret);
let store = Store::dummy_public_v0();
let mut obj = Object::new(content, None, max_object_size, &store);
log_debug!("{}", obj);
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let _ = obj.save_in_test(&store).expect("save");
let _ = obj.save_in_test(t.s()).expect("save");
let file = File::open(obj.id(), obj.key().unwrap(), t.s()).expect("open");
let file = File::open(obj.id(), obj.key().unwrap(), &store).expect("open");
let res = file.read(0, len).expect("read all");
@ -1488,20 +1412,11 @@ mod test {
let len = img_buffer.len();
let max_object_size = store_max_value_size();
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let store = Store::dummy_public_v0();
log_debug!("creating empty file");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
max_object_size,
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
);
let mut file: RandomAccessFile =
RandomAccessFile::new_empty(max_object_size, "image/jpeg".to_string(), vec![], &store);
file.write(&img_buffer).expect("write all");
@ -1514,7 +1429,7 @@ mod test {
let file = File::open(
file.id().unwrap(),
file.key().as_ref().unwrap().clone(),
t.s(),
&store,
)
.expect("open");
@ -1533,19 +1448,14 @@ mod test {
let f = std::fs::File::open("[enter path of a big file here]").expect("open of a big file");
let mut reader = BufReader::new(f);
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating empty file");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);
@ -1587,19 +1497,14 @@ mod test {
let f = std::fs::File::open("[enter path of a big file here]").expect("open of a big file");
let mut reader = BufReader::new(f);
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating empty file");
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
store_max_value_size(),
"image/jpeg".to_string(),
vec![],
&store_repo,
&store_secret,
t.s(),
&store,
);
log_debug!("{}", file);

@ -8,6 +8,8 @@
//! KeyColumnValue Store abstraction
use std::collections::HashMap;
use crate::errors::StorageError;
// TODO:remove mut on self for trait WriteTransaction methods
@ -15,46 +17,62 @@ use crate::errors::StorageError;
pub trait WriteTransaction: ReadTransaction {
/// Save a property value to the store.
fn put(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
/// Replace the property of a key (single value) to the store.
fn replace(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
/// Delete a property from the store.
fn del(&mut self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError>;
fn del(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
/// Delete all properties of a key from the store.
fn del_all(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
all_suffixes: &[u8],
family: &Option<String>,
) -> Result<(), StorageError>;
/// Delete a specific value for a property from the store.
fn del_property_value(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
}
pub trait ReadTransaction {
/// Load a property from the store.
fn get(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<Vec<u8>, StorageError>;
fn get(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<u8>, StorageError>;
/// Load all the values of a property from the store.
#[deprecated(
@ -65,8 +83,17 @@ pub trait ReadTransaction {
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<Vec<u8>>, StorageError>;
fn get_all_properties_of_key(
&self,
prefix: u8,
key: Vec<u8>,
properties: Vec<u8>,
family: &Option<String>,
) -> Result<HashMap<u8, Vec<u8>>, StorageError>;
/// Check if a specific value exists for a property from the store.
fn has_property_value(
&self,
@ -74,6 +101,7 @@ pub trait ReadTransaction {
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
/// retrieves all the keys and values with the given prefix and key_size. if no suffix is specified, then all (including none) the suffices are returned
@ -83,45 +111,46 @@ pub trait ReadTransaction {
key_size: usize,
key_prefix: Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, StorageError>;
}
pub trait KCVStore: ReadTransaction {
pub trait KCVStore: WriteTransaction {
fn write_transaction(
&self,
method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>,
) -> Result<(), StorageError>;
/// Save a property value to the store.
fn put(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
/// Replace the property of a key (single value) to the store.
fn replace(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
/// Delete a property from the store.
fn del(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError>;
/// Delete all properties of a key from the store.
fn del_all(&self, prefix: u8, key: &Vec<u8>, all_suffixes: &[u8]) -> Result<(), StorageError>;
/// Delete a specific value for a property from the store.
fn del_property_value(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
// /// Save a property value to the store.
// fn put(
// &self,
// prefix: u8,
// key: &Vec<u8>,
// suffix: Option<u8>,
// value: Vec<u8>,
// ) -> Result<(), StorageError>;
// /// Replace the property of a key (single value) to the store.
// fn replace(
// &self,
// prefix: u8,
// key: &Vec<u8>,
// suffix: Option<u8>,
// value: Vec<u8>,
// ) -> Result<(), StorageError>;
// /// Delete a property from the store.
// fn del(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError>;
// /// Delete all properties of a key from the store.
// fn del_all(&self, prefix: u8, key: &Vec<u8>, all_suffixes: &[u8]) -> Result<(), StorageError>;
// /// Delete a specific value for a property from the store.
// fn del_property_value(
// &self,
// prefix: u8,
// key: &Vec<u8>,
// suffix: Option<u8>,
// value: Vec<u8>,
// ) -> Result<(), StorageError>;
}

@ -24,6 +24,8 @@ pub mod repo;
pub mod site;
pub mod store;
pub mod event;
pub mod utils;

@ -21,6 +21,7 @@ use zeroize::Zeroize;
use crate::block_storage::*;
use crate::errors::*;
use crate::log::*;
use crate::store::Store;
use crate::types::*;
pub const BLOCK_EXTRA: usize = 12; // 8 is the smallest extra + BLOCK_MAX_DATA_EXTRA
@ -60,10 +61,14 @@ pub struct Object {
impl Object {
pub(crate) fn convergence_key(
store_pubkey: &StoreRepo,
store_readcap_secret: &ReadCapSecret,
/*store_pubkey: &StoreRepo,
store_readcap_secret: &ReadCapSecret,*/
store: &Store,
) -> [u8; blake3::OUT_LEN] {
let mut key_material = match (*store_pubkey.repo_id(), store_readcap_secret.clone()) {
let mut key_material = match (
*store.get_store_repo().repo_id(),
store.get_store_readcap_secret().clone(),
) {
(PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => {
[pubkey, secret].concat()
}
@ -271,10 +276,9 @@ impl Object {
content: ObjectContent,
header: Option<CommitHeader>,
block_size: usize,
store: &StoreRepo,
store_secret: &ReadCapSecret,
store: &Store,
) -> Object {
let mut conv_key = Self::convergence_key(store, store_secret);
let mut conv_key = Self::convergence_key(store);
let res = Self::new_with_convergence_key(content, header, block_size, &conv_key);
conv_key.zeroize();
res
@ -424,11 +428,11 @@ impl Object {
pub fn load(
id: ObjectId,
key: Option<SymKey>,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
) -> Result<Object, ObjectParseError> {
fn load_tree(
parents: Vec<BlockId>,
store: &Box<impl BlockStorage + ?Sized>,
store: &Store,
blocks: &mut Vec<BlockId>,
missing: &mut Vec<BlockId>,
block_contents: &mut HashMap<BlockId, Block>,
@ -517,10 +521,7 @@ impl Object {
}
/// Save blocks of the object and the blocks of the header object in the store
pub fn save(
&self,
store: &Box<impl BlockStorage + ?Sized>,
) -> Result<Vec<BlockId>, StorageError> {
pub fn save(&self, store: &Store) -> Result<Vec<BlockId>, StorageError> {
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
//.chain(self.header_blocks.iter())
for block_id in self.blocks.iter() {
@ -544,10 +545,7 @@ impl Object {
}
#[cfg(test)]
pub fn save_in_test(
&mut self,
store: &Box<impl BlockStorage + ?Sized>,
) -> Result<Vec<BlockId>, StorageError> {
pub fn save_in_test(&mut self, store: &Store) -> Result<Vec<BlockId>, StorageError> {
assert!(self.already_saved == false);
self.already_saved = true;
@ -992,15 +990,9 @@ mod test {
content: vec![],
});
let content = ObjectContent::V0(ObjectContentV0::SmallFile(file));
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
let header = CommitHeader::new_with_acks([ObjectId::dummy()].to_vec());
let _obj = Object::new(
content,
header,
store_max_value_size(),
&store_repo,
&store_secret,
);
let _obj = Object::new(content, header, store_max_value_size(), &store);
}
/// Test JPEG file
@ -1015,8 +1007,8 @@ mod test {
let content = ObjectContent::new_file_v0_with_content(img_buffer, "image/jpeg");
let max_object_size = store_max_value_size();
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let obj = Object::new(content, None, max_object_size, &store_repo, &store_secret);
let store = Store::dummy_public_v0();
let obj = Object::new(content, None, max_object_size, &store);
log_debug!("{}", obj);
@ -1046,15 +1038,9 @@ mod test {
//let header = CommitHeader::new_with_acks(acks.clone());
let max_object_size = 0;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
let mut obj = Object::new(
content.clone(),
None,
max_object_size,
&store_repo,
&store_secret,
);
let mut obj = Object::new(content.clone(), None, max_object_size, &store);
log_debug!("{}", obj);
@ -1067,7 +1053,6 @@ mod test {
}
Err(e) => panic!("Object parse error: {:?}", e),
}
let store = Box::new(HashMapBlockStorage::new());
obj.save_in_test(&store).expect("Object save error");
@ -1101,7 +1086,7 @@ mod test {
/// Checks that a content that fits the root node, will not be chunked into children nodes
#[test]
pub fn test_depth_0() {
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
let empty_file =
ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 {
@ -1168,13 +1153,7 @@ mod test {
// let content_ser = serde_bare::to_vec(&content).unwrap();
// log_debug!("content len for 2*524277: {}", content_ser.len());
let empty_obj = Object::new(
empty_file,
None,
store_max_value_size(),
&store_repo,
&store_secret,
);
let empty_obj = Object::new(empty_file, None, store_max_value_size(), &store);
let empty_file_size = empty_obj.size();
log_debug!("empty file size: {}", empty_file_size);
@ -1191,13 +1170,7 @@ mod test {
let content_ser = serde_bare::to_vec(&content).unwrap();
log_debug!("content len: {}", content_ser.len());
let object = Object::new(
content,
None,
store_max_value_size(),
&store_repo,
&store_secret,
);
let object = Object::new(content, None, store_max_value_size(), &store);
log_debug!("{}", object);
log_debug!("object size: {}", object.size());
@ -1217,7 +1190,7 @@ mod test {
////// 16 GB of data!
let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE - 10;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 16GB of data");
let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 {
content_type: "".into(),
@ -1227,13 +1200,7 @@ mod test {
//let content_ser = serde_bare::to_vec(&content).unwrap();
//log_debug!("content len: {}", content_ser.len());
log_debug!("creating object with that data");
let object = Object::new(
content,
None,
store_max_value_size(),
&store_repo,
&store_secret,
);
let object = Object::new(content, None, store_max_value_size(), &store);
log_debug!("{}", object);
let obj_size = object.size();
@ -1260,7 +1227,7 @@ mod test {
////// 16 GB of data!
let data_size = MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 16GB of data");
let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 {
content_type: "".into(),
@ -1270,13 +1237,7 @@ mod test {
//let content_ser = serde_bare::to_vec(&content).unwrap();
//log_debug!("content len: {}", content_ser.len());
log_debug!("creating object with that data");
let object = Object::new(
content,
None,
store_max_value_size(),
&store_repo,
&store_secret,
);
let object = Object::new(content, None, store_max_value_size(), &store);
log_debug!("{}", object);
let obj_size = object.size();
@ -1304,7 +1265,7 @@ mod test {
let data_size =
MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE - 10;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 900MB of data");
let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 {
content_type: "".into(),
@ -1314,13 +1275,7 @@ mod test {
//let content_ser = serde_bare::to_vec(&content).unwrap();
//log_debug!("content len: {}", content_ser.len());
log_debug!("creating object with that data");
let object = Object::new(
content,
None,
store_valid_value_size(0),
&store_repo,
&store_secret,
);
let object = Object::new(content, None, store_valid_value_size(0), &store);
log_debug!("{}", object);
let obj_size = object.size();
@ -1362,7 +1317,7 @@ mod test {
* MAX_DATA_PAYLOAD_SIZE
- 12;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let store = Store::dummy_public_v0();
log_debug!("creating 52GB of data");
let content = ObjectContent::V0(ObjectContentV0::SmallFile(SmallFile::V0(SmallFileV0 {
content_type: "".into(),
@ -1372,13 +1327,7 @@ mod test {
//let content_ser = serde_bare::to_vec(&content).unwrap();
//log_debug!("content len: {}", content_ser.len());
log_debug!("creating object with that data");
let object = Object::new(
content,
None,
store_valid_value_size(0),
&store_repo,
&store_secret,
);
let object = Object::new(content, None, store_valid_value_size(0), &store);
log_debug!("{}", object);
let obj_size = object.size();

@ -14,17 +14,15 @@ use crate::errors::*;
use crate::event::*;
use crate::log::*;
use crate::object::Object;
use crate::store::Store;
use crate::types::*;
use crate::utils::generate_keypair;
use crate::utils::sign;
use core::fmt;
use rand::prelude::*;
use std::collections::HashMap;
use std::collections::HashSet;
use threshold_crypto::{SecretKeySet, SecretKeyShare};
impl RepositoryV0 {
pub fn new(id: &PubKey, metadata: &Vec<u8>) -> RepositoryV0 {
RepositoryV0 {
@ -76,20 +74,20 @@ impl UserInfo {
}
/// In memory Repository representation. With helper functions that access the underlying UserStore and keeps proxy of the values
pub struct Repo<'a> {
pub struct Repo {
pub id: RepoId,
/// Repo definition
pub repo_def: Repository,
pub signer: Option<SignerCap>,
pub members: HashMap<Digest, UserInfo>,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
pub store: Box<Store>,
}
impl<'a> fmt::Display for Repo<'a> {
impl fmt::Display for Repo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "====== Repo ======")?;
writeln!(f, "====== Repo ====== {}", self.id)?;
write!(f, "== repo_def: {}", self.repo_def)?;
@ -103,364 +101,12 @@ impl<'a> fmt::Display for Repo<'a> {
}
}
impl<'a> Repo<'a> {
/// returns the Repo and the last seq_num of the peer
pub fn new_default(
creator: &UserId,
creator_priv_key: &PrivKey,
publisher_peer: &PrivKey,
peer_last_seq_num: &mut u64,
store_repo: &StoreRepo,
store_secret: &ReadCapSecret,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
) -> Result<(Self, Vec<Event>), NgError> {
let mut events = Vec::with_capacity(6);
// creating the Repository commit
let (repo_priv_key, repo_pub_key) = generate_keypair();
//let overlay = store_repo.overlay_id_for_read_purpose();
let repository = Repository::V0(RepositoryV0 {
id: repo_pub_key,
verification_program: vec![],
creator: None,
metadata: vec![],
});
let repository_commit_body = CommitBody::V0(CommitBodyV0::Repository(repository.clone()));
let repository_commit = Commit::new_with_body_acks_deps_and_save(
&repo_priv_key,
&repo_pub_key,
repo_pub_key,
QuorumType::NoSigning,
vec![],
vec![],
repository_commit_body,
&store_repo,
&store_secret,
storage,
)?;
log_debug!("REPOSITORY COMMIT {}", repository_commit);
let repository_commit_ref = repository_commit.reference().unwrap();
let (topic_priv_key, topic_pub_key) = generate_keypair();
// creating the RootBranch commit, acks to Repository commit
let repo_write_cap_secret = SymKey::random();
let root_branch_commit_body =
CommitBody::V0(CommitBodyV0::RootBranch(RootBranch::V0(RootBranchV0 {
id: repo_pub_key,
repo: repository_commit_ref.clone(),
store: store_repo.into(),
store_sig: None, //TODO: the store signature
topic: topic_pub_key,
topic_privkey: Branch::encrypt_topic_priv_key(
&topic_priv_key,
topic_pub_key,
repo_pub_key,
&repo_write_cap_secret,
),
inherit_perms_users_and_quorum_from_store: None,
quorum: None,
reconciliation_interval: RelTime::None,
owners: vec![creator.clone()],
metadata: vec![],
})));
let root_branch_commit = Commit::new_with_body_acks_deps_and_save(
&repo_priv_key,
&repo_pub_key,
repo_pub_key,
QuorumType::NoSigning,
vec![],
vec![repository_commit_ref.clone()],
root_branch_commit_body,
&store_repo,
&store_secret,
storage,
)?;
log_debug!("ROOT_BRANCH COMMIT {}", root_branch_commit);
// adding the 2 events for the Repository and Rootbranch commits
//peer_last_seq_num += 1;
events.push(Event::new(
publisher_peer,
peer_last_seq_num,
&repository_commit,
&vec![],
topic_pub_key,
root_branch_commit.key().unwrap(),
&topic_priv_key,
storage,
)?);
//peer_last_seq_num += 1;
events.push(Event::new(
publisher_peer,
peer_last_seq_num,
&root_branch_commit,
&vec![],
topic_pub_key,
root_branch_commit.key().unwrap(),
&topic_priv_key,
storage,
)?);
// creating the main branch
let (main_branch_priv_key, main_branch_pub_key) = generate_keypair();
let (main_branch_topic_priv_key, main_branch_topic_pub_key) = generate_keypair();
let main_branch_commit_body = CommitBody::V0(CommitBodyV0::Branch(Branch::V0(BranchV0 {
id: main_branch_pub_key,
content_type: BranchContentType::None,
repo: repository_commit_ref.clone(),
root_branch_readcap_id: root_branch_commit.id().unwrap(),
topic: main_branch_topic_pub_key,
topic_privkey: Branch::encrypt_topic_priv_key(
&main_branch_topic_priv_key,
main_branch_topic_pub_key,
main_branch_pub_key,
&repo_write_cap_secret,
),
metadata: vec![],
})));
let main_branch_commit = Commit::new_with_body_acks_deps_and_save(
&main_branch_priv_key,
&main_branch_pub_key,
main_branch_pub_key,
QuorumType::NoSigning,
vec![],
vec![],
main_branch_commit_body,
&store_repo,
&store_secret,
storage,
)?;
log_debug!("MAIN BRANCH COMMIT {}", main_branch_commit);
// adding the event for the Branch commit
// peer_last_seq_num += 1;
events.push(Event::new(
publisher_peer,
peer_last_seq_num,
&main_branch_commit,
&vec![],
main_branch_topic_pub_key,
main_branch_commit.key().unwrap(),
&main_branch_topic_priv_key,
storage,
)?);
// creating the AddBranch commit (on root_branch), deps to the RootBranch commit
// author is the owner
let add_branch_commit_body =
CommitBody::V0(CommitBodyV0::AddBranch(AddBranch::V0(AddBranchV0 {
branch_type: BranchType::Main,
topic_id: main_branch_topic_pub_key,
branch_read_cap: main_branch_commit.reference().unwrap(),
})));
let add_branch_commit = Commit::new_with_body_acks_deps_and_save(
creator_priv_key,
creator,
repo_pub_key,
QuorumType::Owners,
vec![root_branch_commit.reference().unwrap()],
vec![],
add_branch_commit_body,
&store_repo,
&store_secret,
storage,
)?;
log_debug!("ADD_BRANCH COMMIT {}", add_branch_commit);
// TODO: optional AddMember and AddPermission, that should be added as deps to the SynSignature below (and to the commits of the SignatureContent)
// using the creator as author (and incrementing their peer's seq_num)
// preparing the threshold keys for the unique owner
let mut rng = rand::thread_rng();
let sk_set = SecretKeySet::random(0, &mut rng);
let pk_set = sk_set.public_keys();
let sk_share = sk_set.secret_key_share(0);
// creating signature for RootBranch, AddBranch and Branch commits
// signed with owner threshold signature (threshold = 0)
let signature_content = SignatureContent::V0(SignatureContentV0 {
commits: vec![
root_branch_commit.id().unwrap(),
add_branch_commit.id().unwrap(),
main_branch_commit.id().unwrap(),
],
});
let signature_content_ser = serde_bare::to_vec(&signature_content).unwrap();
let sig_share = sk_share.sign(signature_content_ser);
let sig = pk_set
.combine_signatures([(0, &sig_share)])
.map_err(|_| NgError::IncompleteSignature)?;
let threshold_sig = ThresholdSignatureV0::Owners((sig));
// creating root certificate of the repo
let cert_content = CertificateContentV0 {
previous: repository_commit_ref,
readcap_id: root_branch_commit.id().unwrap(),
owners_pk_set: pk_set.public_key(),
orders_pk_sets: OrdersPublicKeySetsV0::None,
};
// signing the root certificate
let cert_content_ser = serde_bare::to_vec(&cert_content).unwrap();
let sig = sign(&repo_priv_key, &repo_pub_key, &cert_content_ser)?;
let cert_sig = CertificateSignatureV0::Repo(sig);
let cert = Certificate::V0(CertificateV0 {
content: cert_content,
sig: cert_sig,
});
// saving the certificate
let cert_object = Object::new(
ObjectContent::V0(ObjectContentV0::Certificate(cert)),
None,
0,
&store_repo,
&store_secret,
);
let mut cert_obj_blocks = cert_object.save(storage)?;
// finally getting the signature:
let signature = Signature::V0(SignatureV0 {
content: signature_content,
threshold_sig,
certificate_ref: cert_object.reference().unwrap(),
});
// saving the signature
let sig_object = Object::new(
ObjectContent::V0(ObjectContentV0::Signature(signature)),
None,
0,
&store_repo,
&store_secret,
);
let mut sig_obj_blocks = sig_object.save(storage)?;
// keeping the Secret Key Share of the owner
let signer_cap = SignerCap {
repo: repo_pub_key,
epoch: root_branch_commit.id().unwrap(),
owner: Some(threshold_crypto::serde_impl::SerdeSecret(sk_share)),
total_order: None,
partial_order: None,
};
let sync_signature = SyncSignature::V0(sig_object.reference().unwrap());
// creating the SyncSignature for the root_branch with deps to the AddBranch and acks to the RootBranch commit as it is its direct causal future.
let sync_sig_commit_body = CommitBody::V0(CommitBodyV0::SyncSignature(sync_signature));
let sync_sig_on_root_branch_commit = Commit::new_with_body_acks_deps_and_save(
creator_priv_key,
creator,
repo_pub_key,
QuorumType::IamTheSignature,
vec![add_branch_commit.reference().unwrap()],
vec![root_branch_commit.reference().unwrap()],
sync_sig_commit_body.clone(),
&store_repo,
&store_secret,
storage,
)?;
// adding the event for the sync_sig_on_root_branch_commit
let mut additional_blocks = Vec::with_capacity(
cert_obj_blocks.len() + sig_obj_blocks.len() + add_branch_commit.blocks().len(),
);
additional_blocks.extend(cert_obj_blocks.iter());
additional_blocks.extend(sig_obj_blocks.iter());
additional_blocks.extend(add_branch_commit.blocks().iter());
//peer_last_seq_num += 1;
events.push(Event::new(
publisher_peer,
peer_last_seq_num,
&sync_sig_on_root_branch_commit,
&additional_blocks,
topic_pub_key,
root_branch_commit.key().unwrap(),
&topic_priv_key,
storage,
)?);
// creating the SyncSignature for the main branch with deps to the Branch commit and acks also to this commit as it is its direct causal future.
let sync_sig_on_main_branch_commit = Commit::new_with_body_acks_deps_and_save(
creator_priv_key,
creator,
main_branch_pub_key,
QuorumType::IamTheSignature,
vec![main_branch_commit.reference().unwrap()],
vec![main_branch_commit.reference().unwrap()],
sync_sig_commit_body,
&store_repo,
&store_secret,
storage,
)?;
// adding the event for the sync_sig_on_main_branch_commit
let mut additional_blocks =
Vec::with_capacity(cert_obj_blocks.len() + sig_obj_blocks.len());
additional_blocks.append(&mut cert_obj_blocks);
additional_blocks.append(&mut sig_obj_blocks);
// peer_last_seq_num += 1;
events.push(Event::new(
publisher_peer,
peer_last_seq_num,
&sync_sig_on_main_branch_commit,
&additional_blocks,
main_branch_topic_pub_key,
main_branch_commit.key().unwrap(),
&main_branch_topic_priv_key,
storage,
)?);
// TODO: add the CertificateRefresh event on main branch
// += 1;
// preparing the Repo
let repo = Repo {
repo_def: repository,
signer: Some(signer_cap),
members: HashMap::new(),
storage,
};
Ok((repo, events))
impl Repo {
#[cfg(test)]
#[allow(deprecated)]
pub fn new_with_perms(perms: &[PermissionV0], store: Box<Store>) -> Self {
let pub_key = PubKey::nil();
Self::new_with_member(&pub_key, &pub_key, perms, OverlayId::dummy(), store)
}
pub fn new_with_member(
@ -468,7 +114,7 @@ impl<'a> Repo<'a> {
member: &UserId,
perms: &[PermissionV0],
overlay: OverlayId,
storage: &'a Box<dyn BlockStorage + Send + Sync + 'a>,
store: Box<Store>,
) -> Self {
let mut members = HashMap::new();
let permissions = HashMap::from_iter(
@ -487,20 +133,21 @@ impl<'a> Repo<'a> {
},
);
Self {
id: id.clone(),
repo_def: Repository::new(id, &vec![]),
members,
storage,
store,
signer: None,
}
}
pub fn verify_permission(&self, commit: &Commit) -> Result<(), NgError> {
let content_author = commit.content_v0().author;
let body = commit.load_body(&self.storage)?;
match self.members.get(&content_author) {
Some(info) => return info.has_any_perm(&body.required_permission()),
None => {}
}
// let body = commit.load_body(self.store.unwrap())?;
// match self.members.get(&content_author) {
// Some(info) => return info.has_any_perm(&body.required_permission()),
// None => {}
// }
Err(NgError::PermissionDenied)
}
@ -511,65 +158,7 @@ impl<'a> Repo<'a> {
}
}
pub fn get_storage(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
self.storage
}
}
#[cfg(test)]
mod test {
use crate::object::*;
use crate::repo::*;
struct Test<'a> {
storage: Box<dyn BlockStorage + Send + Sync + 'a>,
}
impl<'a> Test<'a> {
fn storage(s: impl BlockStorage + 'a) -> Self {
Test {
storage: Box::new(s),
}
}
fn s(&self) -> &Box<dyn BlockStorage + Send + Sync + 'a> {
&self.storage
}
}
#[test]
pub fn test_new_repo_default() {
let (creator_priv_key, creator_pub_key) = generate_keypair();
let (publisher_privkey, publisher_pubkey) = generate_keypair();
let publisher_peer = PeerId::Forwarded(publisher_pubkey);
let mut peer_last_seq_num = 10;
let (store_repo, store_secret) = StoreRepo::dummy_public_v0();
let hashmap_storage = HashMapBlockStorage::new();
let t = Test::storage(hashmap_storage);
let (repo, events) = Repo::new_default(
&creator_pub_key,
&creator_priv_key,
&publisher_privkey,
&mut peer_last_seq_num,
&store_repo,
&store_secret,
t.s(),
)
.expect("new_default");
log_debug!("REPO OBJECT {}", repo);
log_debug!("events: {}\n", events.len());
let mut i = 0;
for e in events {
log_debug!("========== EVENT {:03}: {}", i, e);
i += 1;
}
assert_eq!(peer_last_seq_num, 15);
}
// pub(crate) fn get_store(&self) -> &Store {
// self.store.unwrap()
// }
}

@ -0,0 +1,474 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
//! Store of a Site, or of a Group or Dialog
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use crate::block_storage::BlockStorage;
use crate::errors::{NgError, StorageError};
use crate::object::Object;
use crate::repo::Repo;
use crate::types::*;
use crate::utils::{generate_keypair, sign, verify};
use crate::log::*;
use rand::prelude::*;
use threshold_crypto::{SecretKeySet, SecretKeyShare};
pub struct Store {
store_repo: StoreRepo,
store_readcap: ReadCap,
overlay_id: OverlayId,
storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
//repos: HashMap<RepoId, Repo>,
}
impl Store {
pub fn get_store_repo(&self) -> &StoreRepo {
&self.store_repo
}
pub fn get_store_readcap(&self) -> &ReadCap {
&self.store_readcap
}
pub fn get_store_readcap_secret(&self) -> &ReadCapSecret {
&self.store_readcap.key
}
/// Load a block from the storage.
pub fn get(&self, id: &BlockId) -> Result<Block, StorageError> {
self.storage
.read()
.map_err(|_| StorageError::BackendError)?
.get(&self.overlay_id, id)
}
/// Save a block to the storage.
pub fn put(&self, block: &Block) -> Result<BlockId, StorageError> {
self.storage
.write()
.map_err(|_| StorageError::BackendError)?
.put(&self.overlay_id, block)
}
/// Delete a block from the storage.
pub fn del(&self, id: &BlockId) -> Result<usize, StorageError> {
self.storage
.write()
.map_err(|_| StorageError::BackendError)?
.del(&self.overlay_id, id)
}
/// number of Blocks in the storage
pub fn len(&self) -> Result<usize, StorageError> {
self.storage
.read()
.map_err(|_| StorageError::BackendError)?
.len()
}
pub fn create_repo_default(
self: Box<Self>,
creator: &UserId,
creator_priv_key: &PrivKey,
) -> Result<(Repo, Vec<(Commit, Vec<Digest>)>), NgError> {
let mut events = Vec::with_capacity(6);
// creating the Repository commit
let (repo_priv_key, repo_pub_key) = generate_keypair();
//let overlay = store_repo.overlay_id_for_read_purpose();
let repository = Repository::V0(RepositoryV0 {
id: repo_pub_key,
verification_program: vec![],
creator: None,
metadata: vec![],
});
let repository_commit_body = CommitBody::V0(CommitBodyV0::Repository(repository.clone()));
let repository_commit = Commit::new_with_body_acks_deps_and_save(
&repo_priv_key,
&repo_pub_key,
repo_pub_key,
QuorumType::NoSigning,
vec![],
vec![],
repository_commit_body,
&self,
)?;
log_debug!("REPOSITORY COMMIT {}", repository_commit);
let repository_commit_ref = repository_commit.reference().unwrap();
let (topic_priv_key, topic_pub_key) = generate_keypair();
// creating the RootBranch commit, acks to Repository commit
let repo_write_cap_secret = SymKey::random();
let root_branch_commit_body =
CommitBody::V0(CommitBodyV0::RootBranch(RootBranch::V0(RootBranchV0 {
id: repo_pub_key,
repo: repository_commit_ref.clone(),
store: (&self.store_repo).into(),
store_sig: None, //TODO: the store signature
topic: topic_pub_key,
topic_privkey: Branch::encrypt_topic_priv_key(
&topic_priv_key,
topic_pub_key,
repo_pub_key,
&repo_write_cap_secret,
),
inherit_perms_users_and_quorum_from_store: None,
quorum: None,
reconciliation_interval: RelTime::None,
owners: vec![creator.clone()],
metadata: vec![],
})));
let root_branch_commit = Commit::new_with_body_acks_deps_and_save(
&repo_priv_key,
&repo_pub_key,
repo_pub_key,
QuorumType::NoSigning,
vec![],
vec![repository_commit_ref.clone()],
root_branch_commit_body,
&self,
)?;
log_debug!("ROOT_BRANCH COMMIT {}", root_branch_commit);
let root_branch_readcap = root_branch_commit.reference().unwrap();
let root_branch_readcap_id = root_branch_readcap.id;
// adding the 2 events for the Repository and Rootbranch commits
//peer_last_seq_num += 1;
events.push((repository_commit, vec![]));
// events.push(Event::new(
// publisher_peer,
// peer_last_seq_num,
// &repository_commit,
// &vec![],
// topic_pub_key,
// root_branch_commit.key().unwrap(),
// &topic_priv_key,
// store,
// )?);
//peer_last_seq_num += 1;
events.push((root_branch_commit, vec![]));
// events.push(Event::new(
// publisher_peer,
// peer_last_seq_num,
// &root_branch_commit,
// &vec![],
// topic_pub_key,
// root_branch_commit.key().unwrap(),
// &topic_priv_key,
// storage,
// )?);
// creating the main branch
let (main_branch_priv_key, main_branch_pub_key) = generate_keypair();
let (main_branch_topic_priv_key, main_branch_topic_pub_key) = generate_keypair();
let main_branch_commit_body = CommitBody::V0(CommitBodyV0::Branch(Branch::V0(BranchV0 {
id: main_branch_pub_key,
content_type: BranchContentType::None,
repo: repository_commit_ref.clone(),
root_branch_readcap_id,
topic: main_branch_topic_pub_key,
topic_privkey: Branch::encrypt_topic_priv_key(
&main_branch_topic_priv_key,
main_branch_topic_pub_key,
main_branch_pub_key,
&repo_write_cap_secret,
),
metadata: vec![],
})));
let main_branch_commit = Commit::new_with_body_acks_deps_and_save(
&main_branch_priv_key,
&main_branch_pub_key,
main_branch_pub_key,
QuorumType::NoSigning,
vec![],
vec![],
main_branch_commit_body,
&self,
)?;
let branch_read_cap = main_branch_commit.reference().unwrap();
let branch_read_cap_id = branch_read_cap.id;
log_debug!("MAIN BRANCH COMMIT {}", main_branch_commit);
// adding the event for the Branch commit
// peer_last_seq_num += 1;
events.push((main_branch_commit, vec![]));
// events.push(Event::new(
// publisher_peer,
// peer_last_seq_num,
// &main_branch_commit,
// &vec![],
// main_branch_topic_pub_key,
// main_branch_commit.key().unwrap(),
// &main_branch_topic_priv_key,
// storage,
// )?);
// creating the AddBranch commit (on root_branch), deps to the RootBranch commit
// author is the owner
let add_branch_commit_body =
CommitBody::V0(CommitBodyV0::AddBranch(AddBranch::V0(AddBranchV0 {
branch_type: BranchType::Main,
topic_id: main_branch_topic_pub_key,
branch_read_cap: branch_read_cap.clone(),
})));
let add_branch_commit = Commit::new_with_body_acks_deps_and_save(
creator_priv_key,
creator,
repo_pub_key,
QuorumType::Owners,
vec![root_branch_readcap.clone()],
vec![],
add_branch_commit_body,
&self,
)?;
log_debug!("ADD_BRANCH COMMIT {}", add_branch_commit);
// TODO: optional AddMember and AddPermission, that should be added as deps to the SynSignature below (and to the commits of the SignatureContent)
// using the creator as author (and incrementing their peer's seq_num)
// preparing the threshold keys for the unique owner
let mut rng = rand::thread_rng();
let sk_set = SecretKeySet::random(0, &mut rng);
let pk_set = sk_set.public_keys();
let sk_share = sk_set.secret_key_share(0);
// creating signature for RootBranch, AddBranch and Branch commits
// signed with owner threshold signature (threshold = 0)
let signature_content = SignatureContent::V0(SignatureContentV0 {
commits: vec![
root_branch_readcap_id,
add_branch_commit.id().unwrap(),
branch_read_cap_id,
],
});
let signature_content_ser = serde_bare::to_vec(&signature_content).unwrap();
let sig_share = sk_share.sign(signature_content_ser);
let sig = pk_set
.combine_signatures([(0, &sig_share)])
.map_err(|_| NgError::IncompleteSignature)?;
let threshold_sig = ThresholdSignatureV0::Owners((sig));
// creating root certificate of the repo
let cert_content = CertificateContentV0 {
previous: repository_commit_ref,
readcap_id: root_branch_readcap_id,
owners_pk_set: pk_set.public_key(),
orders_pk_sets: OrdersPublicKeySetsV0::None,
};
// signing the root certificate
let cert_content_ser = serde_bare::to_vec(&cert_content).unwrap();
let sig = sign(&repo_priv_key, &repo_pub_key, &cert_content_ser)?;
let cert_sig = CertificateSignatureV0::Repo(sig);
let cert = Certificate::V0(CertificateV0 {
content: cert_content,
sig: cert_sig,
});
// saving the certificate
let cert_object = Object::new(
ObjectContent::V0(ObjectContentV0::Certificate(cert)),
None,
0,
&self,
);
let mut cert_obj_blocks = cert_object.save(&self)?;
// finally getting the signature:
let signature = Signature::V0(SignatureV0 {
content: signature_content,
threshold_sig,
certificate_ref: cert_object.reference().unwrap(),
});
// saving the signature
let sig_object = Object::new(
ObjectContent::V0(ObjectContentV0::Signature(signature)),
None,
0,
&self,
);
let mut sig_obj_blocks = sig_object.save(&self)?;
// keeping the Secret Key Share of the owner
let signer_cap = SignerCap {
repo: repo_pub_key,
epoch: root_branch_readcap_id,
owner: Some(threshold_crypto::serde_impl::SerdeSecret(sk_share)),
total_order: None,
partial_order: None,
};
let sync_signature = SyncSignature::V0(sig_object.reference().unwrap());
// creating the SyncSignature for the root_branch with deps to the AddBranch and acks to the RootBranch commit as it is its direct causal future.
let sync_sig_commit_body = CommitBody::V0(CommitBodyV0::SyncSignature(sync_signature));
let sync_sig_on_root_branch_commit = Commit::new_with_body_acks_deps_and_save(
creator_priv_key,
creator,
repo_pub_key,
QuorumType::IamTheSignature,
vec![add_branch_commit.reference().unwrap()],
vec![root_branch_readcap],
sync_sig_commit_body.clone(),
&self,
)?;
// adding the event for the sync_sig_on_root_branch_commit
let mut additional_blocks = Vec::with_capacity(
cert_obj_blocks.len() + sig_obj_blocks.len() + add_branch_commit.blocks().len(),
);
additional_blocks.extend(cert_obj_blocks.iter());
additional_blocks.extend(sig_obj_blocks.iter());
additional_blocks.extend(add_branch_commit.blocks().iter());
//peer_last_seq_num += 1;
events.push((sync_sig_on_root_branch_commit, additional_blocks));
// events.push(Event::new(
// publisher_peer,
// peer_last_seq_num,
// &sync_sig_on_root_branch_commit,
// &additional_blocks,
// topic_pub_key,
// root_branch_commit.key().unwrap(),
// &topic_priv_key,
// storage,
// )?);
// creating the SyncSignature for the main branch with deps to the Branch commit and acks also to this commit as it is its direct causal future.
let sync_sig_on_main_branch_commit = Commit::new_with_body_acks_deps_and_save(
creator_priv_key,
creator,
main_branch_pub_key,
QuorumType::IamTheSignature,
vec![branch_read_cap.clone()],
vec![branch_read_cap],
sync_sig_commit_body,
&self,
)?;
// adding the event for the sync_sig_on_main_branch_commit
let mut additional_blocks =
Vec::with_capacity(cert_obj_blocks.len() + sig_obj_blocks.len());
additional_blocks.append(&mut cert_obj_blocks);
additional_blocks.append(&mut sig_obj_blocks);
// peer_last_seq_num += 1;
events.push((sync_sig_on_main_branch_commit, additional_blocks));
// events.push(Event::new(
// publisher_peer,
// peer_last_seq_num,
// &sync_sig_on_main_branch_commit,
// &additional_blocks,
// main_branch_topic_pub_key,
// main_branch_commit.key().unwrap(),
// &main_branch_topic_priv_key,
// storage,
// )?);
// TODO: add the CertificateRefresh event on main branch
// += 1;
// preparing the Repo
let repo = Repo {
id: repo_pub_key,
repo_def: repository,
signer: Some(signer_cap),
members: HashMap::new(),
store: self,
};
//let repo_ref = self.repos.entry(repo_pub_key).or_insert(repo);
Ok((repo, events))
}
pub fn new(
store_repo: StoreRepo,
store_readcap: ReadCap,
storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Self {
Self {
store_repo,
store_readcap,
overlay_id: store_repo.overlay_id_for_storage_purpose(),
storage,
//repos: HashMap::new(),
}
}
#[cfg(test)]
#[allow(deprecated)]
pub fn dummy_public_v0() -> Box<Self> {
use crate::block_storage::HashMapBlockStorage;
let store_repo = StoreRepo::dummy_public_v0();
let store_readcap = ReadCap::dummy();
//let storage = Box::new() as Box<dyn BlockStorage + Send + Sync>;
Box::new(Self::new(
store_repo,
store_readcap,
Arc::new(RwLock::new(HashMapBlockStorage::new()))
as Arc<RwLock<dyn BlockStorage + Send + Sync>>,
))
}
#[cfg(test)]
pub fn dummy_with_key(repo_pubkey: PubKey) -> Box<Self> {
use crate::block_storage::HashMapBlockStorage;
let store_repo = StoreRepo::dummy_with_key(repo_pubkey);
let store_readcap = ReadCap::dummy();
//let storage = Box::new() as Box<dyn BlockStorage + Send + Sync>;
Box::new(Self::new(
store_repo,
store_readcap,
Arc::new(RwLock::new(HashMapBlockStorage::new()))
as Arc<RwLock<dyn BlockStorage + Send + Sync>>,
))
}
}

@ -669,18 +669,46 @@ impl StoreRepo {
}
#[cfg(test)]
#[allow(deprecated)]
pub fn dummy_public_v0() -> (Self, SymKey) {
let readcap = SymKey::dummy();
pub fn dummy_public_v0() -> Self {
let store_pubkey = PubKey::nil();
(
StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey)),
readcap,
)
StoreRepo::V0(StoreRepoV0::PublicStore(store_pubkey))
}
#[cfg(test)]
pub fn dummy_with_key(repo_pubkey: PubKey) -> Self {
StoreRepo::V0(StoreRepoV0::PublicStore(repo_pubkey))
}
pub fn overlay_id_for_read_purpose(&self) -> OverlayId {
let store_overlay: StoreOverlay = self.into();
store_overlay.overlay_id_for_read_purpose()
//let store_overlay: StoreOverlay = self.into();
//store_overlay.overlay_id_for_read_purpose()
OverlayId::outer(self.repo_id())
}
// pub fn overlay_id_for_storage_purpose(
// &self,
// store_overlay_branch_readcap_secret: Option<ReadCapSecret>,
// ) -> OverlayId {
// match self {
// Self::V0(StoreRepoV0::PublicStore(id))
// | Self::V0(StoreRepoV0::ProtectedStore(id))
// | Self::V0(StoreRepoV0::Group(id))
// | Self::V0(StoreRepoV0::PrivateStore(id)) => self.overlay_id_for_read_purpose(),
// Self::V0(StoreRepoV0::Dialog(d)) => OverlayId::inner(
// &d.0,
// store_overlay_branch_readcap_secret
// .expect("Dialog needs store_overlay_branch_readcap_secret"),
// ),
// }
// }
pub fn overlay_id_for_storage_purpose(&self) -> OverlayId {
match self {
Self::V0(StoreRepoV0::PublicStore(id))
| Self::V0(StoreRepoV0::ProtectedStore(id))
| Self::V0(StoreRepoV0::Group(id))
| Self::V0(StoreRepoV0::PrivateStore(id)) => self.overlay_id_for_read_purpose(),
Self::V0(StoreRepoV0::Dialog(d)) => OverlayId::Inner(d.1.clone()),
}
}
}
@ -1628,7 +1656,7 @@ pub enum WalletUpdate {
V0(WalletUpdateV0),
}
/// Updates the ReadCap of the public and protected sites (and potentially also Group stores)
/// Updates the ReadCap of the public, protected sites, Group and Dialog stores of the User
///
/// DEPS to the previous ones.
/// this is used to speedup joining the overlay of such stores, for new devices on new brokers
@ -1636,7 +1664,7 @@ pub enum WalletUpdate {
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct StoreUpdateV0 {
// id of the store.
pub id: PubKey,
pub store: StoreRepo,
pub store_read_cap: ReadCap,

File diff suppressed because it is too large Load Diff

@ -11,13 +11,15 @@ use ng_repo::kcv_storage::*;
use ng_repo::errors::*;
use ng_repo::log::*;
use rocksdb::DBIteratorWithThreadMode;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use rocksdb::{
ColumnFamilyDescriptor, Direction, Env, ErrorKind, IteratorMode, Options, SingleThreaded,
TransactionDB, TransactionDBOptions, DB,
ColumnFamily, ColumnFamilyDescriptor, Direction, Env, ErrorKind, IteratorMode, Options,
SingleThreaded, TransactionDB, TransactionDBOptions, DB,
};
pub struct RocksdbTransaction<'a> {
@ -32,6 +34,24 @@ impl<'a> RocksdbTransaction<'a> {
fn tx(&self) -> &rocksdb::Transaction<'a, TransactionDB> {
self.tx.as_ref().unwrap()
}
fn get_iterator(
&self,
property_start: &[u8],
family: &Option<String>,
) -> Result<DBIteratorWithThreadMode<impl rocksdb::DBAccess + 'a>, StorageError> {
Ok(match family {
Some(cf) => self.tx().iterator_cf(
self.store
.db
.cf_handle(&cf)
.ok_or(StorageError::UnknownColumnFamily)?,
IteratorMode::From(property_start, Direction::Forward),
),
None => self
.tx()
.iterator(IteratorMode::From(property_start, Direction::Forward)),
})
}
}
impl<'a> ReadTransaction for RocksdbTransaction<'a> {
@ -41,18 +61,52 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
key_size: usize,
key_prefix: Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, StorageError> {
let property_start =
RocksdbKCVStore::calc_key_start(prefix, key_size, &key_prefix, &suffix);
let iter = self.get_iterator(&property_start, &family)?;
self.store
.get_all_keys_and_values(prefix, key_size, key_prefix, suffix)
.get_all_keys_and_values_(prefix, key_size, key_prefix, suffix, iter)
}
fn get_all_properties_of_key(
&self,
prefix: u8,
key: Vec<u8>,
properties: Vec<u8>,
family: &Option<String>,
) -> Result<HashMap<u8, Vec<u8>>, StorageError> {
let key_size = key.len();
let prop_values = self.get_all_keys_and_values(prefix, key_size, key, None, family)?;
Ok(RocksdbKCVStore::get_all_properties_of_key(
prop_values,
key_size,
&properties,
))
}
/// Load a single value property from the store.
fn get(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<Vec<u8>, StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, suffix);
let res = self
.tx()
.get_for_update(property, true)
.map_err(|_e| StorageError::BackendError)?;
fn get(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<u8>, StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, &suffix);
let res = match family {
Some(cf) => self.tx().get_for_update_cf(
self.store
.db
.cf_handle(&cf)
.ok_or(StorageError::UnknownColumnFamily)?,
property,
true,
),
None => self.tx().get_for_update(property, true),
}
.map_err(|_e| StorageError::BackendError)?;
match res {
Some(val) => Ok(val),
None => Err(StorageError::NotFound),
@ -65,6 +119,7 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<Vec<u8>>, StorageError> {
unimplemented!();
}
@ -76,21 +131,13 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, suffix);
let exists = self
.tx()
.get_for_update(property, true)
.map_err(|_e| StorageError::BackendError)?;
match exists {
Some(stored_value) => {
if stored_value.eq(value) {
Ok(())
} else {
Err(StorageError::DifferentValue)
}
}
None => Err(StorageError::NotFound),
let exists = self.get(prefix, key, suffix, family)?;
if exists.eq(value) {
Ok(())
} else {
Err(StorageError::DifferentValue)
}
}
}
@ -98,41 +145,61 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
impl<'a> WriteTransaction for RocksdbTransaction<'a> {
/// Save a property value to the store.
fn put(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, suffix);
self.tx()
.put(property, value)
.map_err(|_e| StorageError::BackendError)?;
let property = RocksdbKCVStore::compute_property(prefix, key, &suffix);
match family {
Some(cf) => self.tx().put_cf(
self.store
.db
.cf_handle(&cf)
.ok_or(StorageError::UnknownColumnFamily)?,
property,
value,
),
None => self.tx().put(property, value),
}
.map_err(|_e| StorageError::BackendError)?;
Ok(())
}
/// Replace the property of a key (single value) to the store.
fn replace(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, suffix);
self.tx()
.put(property, value)
.map_err(|_e| StorageError::BackendError)?;
Ok(())
self.put(prefix, key, suffix, value, family)
}
/// Delete a property from the store.
fn del(&mut self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, suffix);
let res = self.tx().delete(property);
fn del(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, &suffix);
let res = match family {
Some(cf) => self.tx().delete_cf(
self.store
.db
.cf_handle(&cf)
.ok_or(StorageError::UnknownColumnFamily)?,
property,
),
None => self.tx().delete(property),
};
if res.is_err() {
if let ErrorKind::NotFound = res.unwrap_err().kind() {
return Ok(());
@ -144,42 +211,35 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> {
/// Delete a specific value for a property from the store.
fn del_property_value(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, suffix);
let exists = self
.tx()
.get_for_update(property.clone(), true)
.map_err(|_e| StorageError::BackendError)?;
match exists {
Some(val) => {
if val.eq(value) {
self.tx()
.delete(property)
.map_err(|_e| StorageError::BackendError)?;
}
}
None => return Err(StorageError::DifferentValue),
let exists = self.get(prefix, key, suffix, family)?;
if exists.eq(value) {
self.del(prefix, key, suffix, family)
} else {
Err(StorageError::DifferentValue)
}
Ok(())
}
/// Delete all properties of a key from the store.
// TODO: this could be optimized with an iterator
fn del_all(
&mut self,
&self,
prefix: u8,
key: &Vec<u8>,
all_suffixes: &[u8],
family: &Option<String>,
) -> Result<(), StorageError> {
for suffix in all_suffixes {
self.del(prefix, key, Some(*suffix))?;
self.del(prefix, key, Some(*suffix), family)?;
}
if all_suffixes.is_empty() {
self.del(prefix, key, None)?;
self.del(prefix, key, None, family)?;
}
Ok(())
}
@ -187,7 +247,7 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> {
pub struct RocksdbKCVStore {
/// the main store where all the properties of keys are stored
main_db: TransactionDB,
db: TransactionDB,
/// path for the storage backend data
path: String,
}
@ -207,69 +267,57 @@ fn compare<T: Ord>(a: &[T], b: &[T]) -> std::cmp::Ordering {
}
impl ReadTransaction for RocksdbKCVStore {
/// returns a list of (key,value) that are in the range specified in the request
fn get_all_keys_and_values(
&self,
prefix: u8,
key_size: usize,
key_prefix: Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, StorageError> {
if key_prefix.len() > key_size {
return Err(StorageError::InvalidValue);
}
let mut vec_key_start = key_prefix.clone();
let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()];
vec_key_start.append(&mut trailing_zeros);
let mut vec_key_end = key_prefix.clone();
let mut trailing_max = vec![255u8; key_size - key_prefix.len()];
vec_key_end.append(&mut trailing_max);
let property_start = Self::compute_property(prefix, &vec_key_start, suffix);
let property_end =
Self::compute_property(prefix, &vec_key_end, Some(suffix.unwrap_or(255u8)));
let property_start = Self::calc_key_start(prefix, key_size, &key_prefix, &suffix);
let iter = self.get_iterator(&property_start, &family)?;
self.get_all_keys_and_values_(prefix, key_size, key_prefix, suffix, iter)
}
let mut iter = self
.main_db
.iterator(IteratorMode::From(&property_start, Direction::Forward));
let mut vector: Vec<(Vec<u8>, Vec<u8>)> = vec![];
loop {
let res = iter.next();
match res {
Some(Ok(val)) => {
match compare(&val.0, property_end.as_slice()) {
std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {
if suffix.is_some() {
if val.0.len() < (key_size + 2)
|| val.0[1 + key_size] != suffix.unwrap()
{
continue;
}
// } else if val.0.len() > (key_size + 1) {
// continue;
}
vector.push((val.0.to_vec(), val.1.to_vec()));
}
_ => {} //,
}
}
Some(Err(_e)) => return Err(StorageError::BackendError),
None => {
break;
}
}
}
Ok(vector)
/// returns a map of found properties and their value. If `properties` is empty, then all the properties are returned.
/// Otherwise, only the properties in the list are returned (if found in backend storage)
fn get_all_properties_of_key(
&self,
prefix: u8,
key: Vec<u8>,
properties: Vec<u8>,
family: &Option<String>,
) -> Result<HashMap<u8, Vec<u8>>, StorageError> {
let key_size = key.len();
let prop_values = self.get_all_keys_and_values(prefix, key_size, key, None, family)?;
Ok(Self::get_all_properties_of_key(
prop_values,
key_size,
&properties,
))
}
/// Load a single value property from the store.
fn get(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<Vec<u8>, StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let res = self
.main_db
.get(property)
.map_err(|_e| StorageError::BackendError)?;
fn get(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<u8>, StorageError> {
let property = Self::compute_property(prefix, key, &suffix);
let res = match family {
Some(cf) => self.db.get_cf(
self.db
.cf_handle(&cf)
.ok_or(StorageError::UnknownColumnFamily)?,
property,
),
None => self.db.get(property),
}
.map_err(|_e| StorageError::BackendError)?;
match res {
Some(val) => Ok(val),
None => Err(StorageError::NotFound),
@ -282,6 +330,7 @@ impl ReadTransaction for RocksdbKCVStore {
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<Vec<u8>>, StorageError> {
unimplemented!();
}
@ -293,21 +342,13 @@ impl ReadTransaction for RocksdbKCVStore {
key: &Vec<u8>,
suffix: Option<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let exists = self
.main_db
.get(property)
.map_err(|_e| StorageError::BackendError)?;
match exists {
Some(stored_value) => {
if stored_value.eq(value) {
Ok(())
} else {
Err(StorageError::DifferentValue)
}
}
None => Err(StorageError::NotFound),
let exists = self.get(prefix, key, suffix, family)?;
if exists.eq(value) {
Ok(())
} else {
Err(StorageError::DifferentValue)
}
}
}
@ -317,7 +358,7 @@ impl KCVStore for RocksdbKCVStore {
&self,
method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>,
) -> Result<(), StorageError> {
let tx = self.main_db.transaction();
let tx = self.db.transaction();
let mut transaction = RocksdbTransaction {
store: self,
@ -330,16 +371,19 @@ impl KCVStore for RocksdbKCVStore {
}
res
}
}
impl WriteTransaction for RocksdbKCVStore {
/// Save a property value to the store.
fn put(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| tx.put(prefix, key, suffix, &value))
self.write_transaction(&mut |tx| tx.put(prefix, key, suffix, value, family))
}
/// Replace the property of a key (single value) to the store.
@ -348,14 +392,21 @@ impl KCVStore for RocksdbKCVStore {
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| tx.replace(prefix, key, suffix, &value))
self.write_transaction(&mut |tx| tx.replace(prefix, key, suffix, value, family))
}
/// Delete a property from the store.
fn del(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| tx.del(prefix, key, suffix))
fn del(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| tx.del(prefix, key, suffix, family))
}
/// Delete a specific value for a property from the store.
@ -364,20 +415,29 @@ impl KCVStore for RocksdbKCVStore {
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| tx.del_property_value(prefix, key, suffix, &value))
self.write_transaction(&mut |tx| tx.del_property_value(prefix, key, suffix, value, family))
}
/// Delete all properties of a key from the store.
fn del_all(&self, prefix: u8, key: &Vec<u8>, all_suffixes: &[u8]) -> Result<(), StorageError> {
for suffix in all_suffixes {
self.del(prefix, key, Some(*suffix))?;
}
if all_suffixes.is_empty() {
self.del(prefix, key, None)?;
}
Ok(())
fn del_all(
&self,
prefix: u8,
key: &Vec<u8>,
all_suffixes: &[u8],
family: &Option<String>,
) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| {
for suffix in all_suffixes {
tx.del(prefix, key, Some(*suffix), family)?;
}
if all_suffixes.is_empty() {
tx.del(prefix, key, None, family)?;
}
Ok(())
})
}
}
@ -386,7 +446,123 @@ impl RocksdbKCVStore {
PathBuf::from(&self.path)
}
fn compute_property(prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Vec<u8> {
fn get_all_properties_of_key(
prop_values: Vec<(Vec<u8>, Vec<u8>)>,
key_size: usize,
properties: &Vec<u8>,
) -> HashMap<u8, Vec<u8>> {
let mut res = HashMap::new();
for prop_val in prop_values {
let prop = prop_val.0[1 + key_size];
if properties.len() > 0 && !properties.contains(&prop) {
continue;
}
res.insert(prop, prop_val.1);
}
res
}
fn get_all_keys_and_values_(
&self,
prefix: u8,
key_size: usize,
key_prefix: Vec<u8>,
suffix: Option<u8>,
mut iter: DBIteratorWithThreadMode<'_, impl rocksdb::DBAccess>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, StorageError> {
if key_prefix.len() > key_size {
return Err(StorageError::InvalidValue);
}
// let mut vec_key_start = key_prefix.clone();
// let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()];
// vec_key_start.append(&mut trailing_zeros);
let mut vec_key_end = key_prefix.clone();
let mut trailing_max = vec![255u8; key_size - key_prefix.len()];
vec_key_end.append(&mut trailing_max);
// let property_start = Self::compute_property(prefix, &vec_key_start, suffix);
let property_end =
Self::compute_property(prefix, &vec_key_end, &Some(suffix.unwrap_or(255u8)));
// let mut iter = match family {
// Some(cf) => self.db.iterator_cf(
// self.db
// .cf_handle(&cf)
// .ok_or(StorageError::UnknownColumnFamily)?,
// IteratorMode::From(&property_start, Direction::Forward),
// ),
// None => self
// .db
// .iterator(IteratorMode::From(&property_start, Direction::Forward)),
// };
let mut vector: Vec<(Vec<u8>, Vec<u8>)> = vec![];
loop {
let res = iter.next();
match res {
Some(Ok(val)) => {
match compare(&val.0, property_end.as_slice()) {
std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {
if suffix.is_some() {
if val.0.len() < (key_size + 2)
|| val.0[1 + key_size] != suffix.unwrap()
{
continue;
}
// } else if val.0.len() > (key_size + 1) {
// continue;
}
vector.push((val.0.to_vec(), val.1.to_vec()));
}
_ => {} //,
}
}
Some(Err(_e)) => return Err(StorageError::BackendError),
None => {
break;
}
}
}
Ok(vector)
}
fn calc_key_start(
prefix: u8,
key_size: usize,
key_prefix: &Vec<u8>,
suffix: &Option<u8>,
) -> Vec<u8> {
let mut vec_key_start = key_prefix.clone();
let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()];
vec_key_start.append(&mut trailing_zeros);
let mut vec_key_end = key_prefix.clone();
let mut trailing_max = vec![255u8; key_size - key_prefix.len()];
vec_key_end.append(&mut trailing_max);
Self::compute_property(prefix, &vec_key_start, suffix)
}
fn get_iterator(
&self,
property_start: &[u8],
family: &Option<String>,
) -> Result<DBIteratorWithThreadMode<'_, impl rocksdb::DBAccess>, StorageError> {
Ok(match family {
Some(cf) => self.db.iterator_cf(
self.db
.cf_handle(&cf)
.ok_or(StorageError::UnknownColumnFamily)?,
IteratorMode::From(property_start, Direction::Forward),
),
None => self
.db
.iterator(IteratorMode::From(property_start, Direction::Forward)),
})
}
fn compute_property(prefix: u8, key: &Vec<u8>, suffix: &Option<u8>) -> Vec<u8> {
let mut new: Vec<u8> = Vec::with_capacity(key.len() + 2);
new.push(prefix);
new.extend(key);
@ -412,7 +588,7 @@ impl RocksdbKCVStore {
log_info!("created db with Rocksdb Version: {}", Env::version());
Ok(RocksdbKCVStore {
main_db: db,
db: db,
path: path.to_str().unwrap().to_string(),
})
}

@ -26,6 +26,11 @@ serde_bytes = "0.11.7"
oxigraph = { git = "https://git.nextgraph.org/NextGraph/oxigraph.git", branch="main" }
automerge = "0.5.9"
yrs = "0.18.2"
async-std = { version = "1.12.0", features = [ "attributes", "unstable" ] }
threshold_crypto = "0.4.0"
rand = { version = "0.7", features = ["getrandom"] }
web-time = "0.2.0"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" }
ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" }
getrandom = "0.2.7"

@ -2,5 +2,7 @@ pub mod types;
pub mod user_storage;
pub mod verifier;
#[cfg(not(target_family = "wasm"))]
pub mod rocksdb_user_storage;

@ -11,22 +11,29 @@
use core::fmt;
//use oxigraph::io::{RdfFormat, RdfParser, RdfSerializer};
use oxigraph::store::Store;
//use oxigraph::store::Store;
//use oxigraph::model::GroundQuad;
#[cfg(not(target_family = "wasm"))]
use crate::rocksdb_user_storage::RocksDbUserStorage;
use crate::user_storage::{InMemoryUserStorage, UserStorage};
use std::path::PathBuf;
use async_std::sync::Mutex;
use std::{collections::HashMap, path::PathBuf, sync::Arc};
use ng_net::{
connection::NoiseFSM,
errors::ProtocolError,
types::*,
utils::{Receiver, Sender},
};
use ng_repo::{
block_storage::BlockStorage,
errors::{NgError, StorageError},
file::RandomAccessFile,
store::Store,
types::*,
};
use serde::{Deserialize, Serialize};
use web_time::SystemTime;
//use yrs::{StateVector, Update};
#[derive(Debug, Clone)]
@ -40,6 +47,7 @@ pub enum VerifierType {
Remote(Option<PubKey>),
/// IndexedDb based rocksdb compiled to WASM... not ready yet. obviously. only works in the browser
WebRocksDb,
// Server, this type is for Server Broker that act as verifier. They answer to VerifierType::Remote types of verifier.
}
impl VerifierType {
@ -101,70 +109,59 @@ pub struct VerifierConfig {
pub type CancelFn = Box<dyn FnOnce()>;
pub struct Verifier {
pub config: VerifierConfig,
pub connected_server_id: Option<PubKey>,
graph_dataset: Option<Store>,
user_storage: Option<Box<dyn UserStorage>>,
}
//
// APP PROTOCOL (between APP and VERIFIER)
//
impl fmt::Debug for Verifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "Verifier\nconfig: {:?}", self.config)?;
writeln!(f, "connected_server_id: {:?}", self.connected_server_id)
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppFetchContentV0 {
Get, // more to be detailed
ReadQuery, // more to be detailed
WriteQuery, // more to be detailed
}
impl Verifier {
pub fn new(config: VerifierConfig) -> Result<Self, StorageError> {
let (graph, user) = match &config.config_type {
VerifierConfigType::Memory | VerifierConfigType::JsSaveSession(_) => (
Some(Store::new().unwrap()),
Some(Box::new(InMemoryUserStorage::new()) as Box<dyn UserStorage>),
),
#[cfg(not(target_family = "wasm"))]
VerifierConfigType::RocksDb(path) => (
// FIXME BIG TIME: we are reusing the same encryption key here.
// this is very temporary, until we remove the code in oxi_rocksdb of oxigraph,
// and have oxigraph use directly the UserStorage
Some(Store::open_with_key(path, config.user_master_key).unwrap()),
Some(
Box::new(RocksDbUserStorage::open(path, config.user_master_key)?)
as Box<dyn UserStorage>,
),
),
VerifierConfigType::Remote(_) => (None, None),
_ => unimplemented!(), // can be WebRocksDb or RocksDb on wasm platforms
};
Ok(Verifier {
config,
connected_server_id: None,
graph_dataset: graph,
user_storage: user,
})
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AppFetchV0 {
pub doc_id: RepoId,
pub fn doc_fetch(
&self,
nuri: String,
payload: Option<AppRequestPayload>,
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
unimplemented!();
}
pub branch_id: Option<BranchId>,
pub store: StoreRepo,
pub content: AppFetchContentV0,
}
//
// APP PROTOCOL (between APP and VERIFIER)
//
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppRequestContentV0 {
FetchNuri,
Fetch(AppFetchV0),
Pin,
UnPin,
Delete,
Create,
FileGet, // needs the Nuri of branch/doc/store AND ObjectId
FilePut, // needs the Nuri of branch/doc/store
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AppRequestV0 {}
pub struct AppRequestV0 {
pub nuri: Option<String>,
pub content: AppRequestContentV0,
pub payload: Option<AppRequestPayload>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppRequest {
V0(AppRequestV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppQuery {
V0(String), // Sparql
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct GraphUpdate {
sparql_update: String,
@ -202,8 +199,12 @@ pub struct AppDelete {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppRequestPayloadV0 {
Create(AppCreate),
Query(AppQuery),
Update(AppUpdate),
Delete(AppDelete),
SmallFilePut(SmallFile),
RandomAccessFilePut(String), // content_type
RandomAccessFilePutChunk((ObjectId, Vec<u8>)), // end the upload with an empty vec
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -256,10 +257,20 @@ pub struct AppPatch {
discrete: Option<DiscretePatch>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct FileName {
name: Option<String>,
reference: ObjectRef,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppResponseV0 {
State(AppState),
Patch(AppPatch),
Text(String),
File(FileName),
FileBinary(Vec<u8>),
QueryResult, // see sparesults
}
#[derive(Clone, Debug, Serialize, Deserialize)]

@ -0,0 +1,309 @@
// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Repo object (on heap) to handle a Repository
use crate::types::*;
use ng_repo::log::*;
use ng_repo::object::Object;
use ng_repo::{
block_storage::BlockStorage,
errors::{NgError, StorageError},
file::RandomAccessFile,
repo::Repo,
store::Store,
types::*,
utils::{generate_keypair, sign},
};
use core::fmt;
//use oxigraph::io::{RdfFormat, RdfParser, RdfSerializer};
//use oxigraph::store::Store;
//use oxigraph::model::GroundQuad;
#[cfg(not(target_family = "wasm"))]
use crate::rocksdb_user_storage::RocksDbUserStorage;
use crate::user_storage::{InMemoryUserStorage, UserStorage};
use async_std::sync::Mutex;
use std::{collections::HashMap, path::PathBuf, sync::Arc};
use ng_net::{
connection::NoiseFSM,
errors::ProtocolError,
types::*,
utils::{Receiver, Sender},
};
use serde::{Deserialize, Serialize};
use web_time::SystemTime;
//use yrs::{StateVector, Update};
pub struct Verifier {
pub config: VerifierConfig,
pub connected_server_id: Option<PubKey>,
graph_dataset: Option<oxigraph::store::Store>,
user_storage: Option<Box<dyn UserStorage>>,
block_storage: Option<Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>>,
last_seq_num: u64,
peer_id: PubKey,
max_reserved_seq_num: u64,
last_reservation: SystemTime,
stores: HashMap<OverlayId, Store>,
repos: HashMap<RepoId, Repo>,
}
impl fmt::Debug for Verifier {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "Verifier\nconfig: {:?}", self.config)?;
writeln!(f, "connected_server_id: {:?}", self.connected_server_id)
}
}
impl Verifier {
#[cfg(test)]
pub fn new_dummy() -> Self {
let (peer_priv_key, peer_id) = generate_keypair();
let block_storage = Arc::new(RwLock::new(HashMapBlockStorage::new()))
as Arc<RwLock<Box<dyn BlockStorage + Send + Sync + 'static>>>;
Verifier {
config: VerifierConfig {
config_type: VerifierConfigType::Memory,
user_master_key: [0; 32],
peer_priv_key,
user_priv_key: PrivKey::random_ed(),
private_store_read_cap: ObjectRef::dummy(),
},
connected_server_id: None,
graph_dataset: None,
user_storage: None,
block_storage: Some(block_storage),
last_seq_num: 0,
peer_id,
max_reserved_seq_num: 1,
last_reservation: SystemTime::now(),
stores: HashMap::new(),
repos: HashMap::new(),
}
}
pub fn get_store(&mut self, store_repo: &StoreRepo) -> &mut Store {
let overlay_id = store_repo.overlay_id_for_storage_purpose();
if self.stores.get(&overlay_id).is_none() {
// FIXME: get store_readcap from user storage
let store_readcap = ReadCap::nil();
let store = Store::new(
*store_repo,
store_readcap,
Arc::clone(
&self
.block_storage
.as_ref()
.ok_or(core::fmt::Error)
.expect("get_store cannot be called on Remote Verifier"),
),
);
//self.stores.insert(overlay_id, store);
let store = self.stores.entry(overlay_id).or_insert(store);
store
} else {
self.stores.get_mut(&overlay_id).unwrap()
}
}
pub(crate) fn new_event(
&mut self,
//publisher: &PrivKey,
//seq: &mut u64,
commit: &Commit,
additional_blocks: &Vec<BlockId>,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
store: &Store, // store could be omitted and a store repo ID would be given instead.
) -> Result<Event, NgError> {
let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
let seq = self.last_seq_number()?;
Event::new(
&self.config.peer_priv_key,
seq,
commit,
additional_blocks,
topic_id,
&topic_priv_key,
store,
)
}
pub(crate) fn last_seq_number(&mut self) -> Result<u64, NgError> {
if self.last_seq_num - 1 >= self.max_reserved_seq_num {
self.reserve_more(1)?;
}
self.last_seq_num += 1;
Ok(self.last_seq_num)
}
pub(crate) fn new_events(
&mut self,
events: Vec<(Commit, Vec<Digest>)>,
store: &Store,
) -> Result<Vec<Event>, NgError> {
let missing_count = events.len() as i64 - self.available_seq_nums() as i64;
// this is reducing the capacity of reserver_seq_num by half (cast from u64 to i64)
// but we will never reach situation where so many seq_nums are reserved, neither such a big list of events to processs
if missing_count >= 0 {
self.reserve_more(missing_count as u64 + 1)?;
}
let mut res = vec![];
for event in events {
let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
self.last_seq_num += 1;
let event = Event::new(
&self.config.peer_priv_key,
self.last_seq_num,
&event.0,
&event.1,
topic_id,
&topic_priv_key,
store,
)?;
res.push(event);
}
Ok(res)
}
fn available_seq_nums(&self) -> u64 {
self.max_reserved_seq_num - self.last_seq_num
}
fn reserve_more(&mut self, at_least: u64) -> Result<(), NgError> {
// the qty should be calculated based on the last_reservation. the closer to now, the higher the qty.
// below 1 sec, => 100
// below 5 sec, => 10
// below 10 sec => 1
self.take_some_peer_last_seq_numbers(10)
}
fn take_some_peer_last_seq_numbers(&mut self, qty: u16) -> Result<(), NgError> {
// TODO the magic
Ok(())
}
pub fn new(
config: VerifierConfig,
block_storage: Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Self, NgError> {
let (graph, user, block) = match &config.config_type {
VerifierConfigType::Memory | VerifierConfigType::JsSaveSession(_) => (
Some(oxigraph::store::Store::new().unwrap()),
Some(Box::new(InMemoryUserStorage::new()) as Box<dyn UserStorage>),
Some(block_storage),
),
#[cfg(not(target_family = "wasm"))]
VerifierConfigType::RocksDb(path) => (
// FIXME BIG TIME: we are reusing the same encryption key here.
// this is very temporary, until we remove the code in oxi_rocksdb of oxigraph,
// and have oxigraph use directly the UserStorage
Some(oxigraph::store::Store::open_with_key(path, config.user_master_key).unwrap()),
Some(
Box::new(RocksDbUserStorage::open(path, config.user_master_key)?)
as Box<dyn UserStorage>,
),
Some(block_storage),
),
VerifierConfigType::Remote(_) => (None, None, None),
_ => unimplemented!(), // can be WebRocksDb or RocksDb on wasm platforms
};
let peer_id = config.peer_priv_key.to_pub();
let mut verif = Verifier {
config,
connected_server_id: None,
graph_dataset: graph,
user_storage: user,
block_storage: block,
peer_id,
last_reservation: SystemTime::now(),
max_reserved_seq_num: 0,
last_seq_num: 0,
stores: HashMap::new(),
repos: HashMap::new(),
};
verif.take_some_peer_last_seq_numbers(1)?;
Ok(verif)
}
pub fn doc_fetch(
&mut self,
nuri: String,
payload: Option<AppRequestPayload>,
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
unimplemented!();
}
pub async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
unimplemented!();
}
/// returns the Repo and the last seq_num of the peer
pub fn new_repo_default<'a>(
&'a mut self,
creator: &UserId,
creator_priv_key: &PrivKey,
//store_repo: &StoreRepo,
store: Box<Store>,
) -> Result<(&'a Repo, Vec<Event>), NgError> {
//let store = self.get_store(store_repo);
let (repo, proto_events) = store.create_repo_default(creator, creator_priv_key)?;
//repo.store = Some(store);
let events = self.new_events(proto_events, &repo.store)?;
let repo_ref = self.repos.entry(repo.id).or_insert(repo);
Ok((repo_ref, events))
}
}
#[cfg(test)]
mod test {
use crate::types::*;
use crate::verifier::*;
use ng_repo::log::*;
#[test]
pub fn test_new_repo_default() {
let (creator_priv_key, creator_pub_key) = generate_keypair();
let (publisher_privkey, publisher_pubkey) = generate_keypair();
let publisher_peer = PeerId::Forwarded(publisher_pubkey);
let store = Store::dummy_public_v0();
let mut verifier = Verifier::new_dummy();
//let store = verifier.get_store(store_repo);
let (repo, events) = verifier
.new_repo_default(&creator_pub_key, &creator_priv_key, store)
.expect("new_default");
log_debug!("REPO OBJECT {}", repo);
log_debug!("events: {}\n", events.len());
let mut i = 0;
for e in events {
log_debug!("========== EVENT {:03}: {}", i, e);
i += 1;
}
assert_eq!(verifier.last_seq_number(), 6);
}
}
Loading…
Cancel
Save