parent
9370a9216e
commit
f349d4a748
@ -0,0 +1,474 @@ |
||||
/* |
||||
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers |
||||
* All rights reserved. |
||||
* Licensed under the Apache License, Version 2.0 |
||||
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
|
||||
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
|
||||
* at your option. All files in the project carrying such |
||||
* notice may not be copied, modified, or distributed except |
||||
* according to those terms. |
||||
*/ |
||||
|
||||
//! Store of a Site, or of a Group or Dialog
|
||||
|
||||
use std::collections::HashMap; |
||||
use std::sync::{Arc, RwLock}; |
||||
|
||||
use crate::block_storage::BlockStorage; |
||||
use crate::errors::{NgError, StorageError}; |
||||
use crate::object::Object; |
||||
use crate::repo::Repo; |
||||
use crate::types::*; |
||||
use crate::utils::{generate_keypair, sign, verify}; |
||||
|
||||
use crate::log::*; |
||||
|
||||
use rand::prelude::*; |
||||
|
||||
use threshold_crypto::{SecretKeySet, SecretKeyShare}; |
||||
|
||||
pub struct Store { |
||||
store_repo: StoreRepo, |
||||
store_readcap: ReadCap, |
||||
overlay_id: OverlayId, |
||||
storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>, |
||||
//repos: HashMap<RepoId, Repo>,
|
||||
} |
||||
impl Store { |
||||
pub fn get_store_repo(&self) -> &StoreRepo { |
||||
&self.store_repo |
||||
} |
||||
|
||||
pub fn get_store_readcap(&self) -> &ReadCap { |
||||
&self.store_readcap |
||||
} |
||||
|
||||
pub fn get_store_readcap_secret(&self) -> &ReadCapSecret { |
||||
&self.store_readcap.key |
||||
} |
||||
|
||||
/// Load a block from the storage.
|
||||
pub fn get(&self, id: &BlockId) -> Result<Block, StorageError> { |
||||
self.storage |
||||
.read() |
||||
.map_err(|_| StorageError::BackendError)? |
||||
.get(&self.overlay_id, id) |
||||
} |
||||
|
||||
/// Save a block to the storage.
|
||||
pub fn put(&self, block: &Block) -> Result<BlockId, StorageError> { |
||||
self.storage |
||||
.write() |
||||
.map_err(|_| StorageError::BackendError)? |
||||
.put(&self.overlay_id, block) |
||||
} |
||||
|
||||
/// Delete a block from the storage.
|
||||
pub fn del(&self, id: &BlockId) -> Result<usize, StorageError> { |
||||
self.storage |
||||
.write() |
||||
.map_err(|_| StorageError::BackendError)? |
||||
.del(&self.overlay_id, id) |
||||
} |
||||
|
||||
/// number of Blocks in the storage
|
||||
pub fn len(&self) -> Result<usize, StorageError> { |
||||
self.storage |
||||
.read() |
||||
.map_err(|_| StorageError::BackendError)? |
||||
.len() |
||||
} |
||||
|
||||
pub fn create_repo_default( |
||||
self: Box<Self>, |
||||
creator: &UserId, |
||||
creator_priv_key: &PrivKey, |
||||
) -> Result<(Repo, Vec<(Commit, Vec<Digest>)>), NgError> { |
||||
let mut events = Vec::with_capacity(6); |
||||
|
||||
// creating the Repository commit
|
||||
|
||||
let (repo_priv_key, repo_pub_key) = generate_keypair(); |
||||
|
||||
//let overlay = store_repo.overlay_id_for_read_purpose();
|
||||
|
||||
let repository = Repository::V0(RepositoryV0 { |
||||
id: repo_pub_key, |
||||
verification_program: vec![], |
||||
creator: None, |
||||
metadata: vec![], |
||||
}); |
||||
|
||||
let repository_commit_body = CommitBody::V0(CommitBodyV0::Repository(repository.clone())); |
||||
|
||||
let repository_commit = Commit::new_with_body_acks_deps_and_save( |
||||
&repo_priv_key, |
||||
&repo_pub_key, |
||||
repo_pub_key, |
||||
QuorumType::NoSigning, |
||||
vec![], |
||||
vec![], |
||||
repository_commit_body, |
||||
&self, |
||||
)?; |
||||
|
||||
log_debug!("REPOSITORY COMMIT {}", repository_commit); |
||||
|
||||
let repository_commit_ref = repository_commit.reference().unwrap(); |
||||
|
||||
let (topic_priv_key, topic_pub_key) = generate_keypair(); |
||||
|
||||
// creating the RootBranch commit, acks to Repository commit
|
||||
|
||||
let repo_write_cap_secret = SymKey::random(); |
||||
|
||||
let root_branch_commit_body = |
||||
CommitBody::V0(CommitBodyV0::RootBranch(RootBranch::V0(RootBranchV0 { |
||||
id: repo_pub_key, |
||||
repo: repository_commit_ref.clone(), |
||||
store: (&self.store_repo).into(), |
||||
store_sig: None, //TODO: the store signature
|
||||
topic: topic_pub_key, |
||||
topic_privkey: Branch::encrypt_topic_priv_key( |
||||
&topic_priv_key, |
||||
topic_pub_key, |
||||
repo_pub_key, |
||||
&repo_write_cap_secret, |
||||
), |
||||
inherit_perms_users_and_quorum_from_store: None, |
||||
quorum: None, |
||||
reconciliation_interval: RelTime::None, |
||||
owners: vec![creator.clone()], |
||||
metadata: vec![], |
||||
}))); |
||||
|
||||
let root_branch_commit = Commit::new_with_body_acks_deps_and_save( |
||||
&repo_priv_key, |
||||
&repo_pub_key, |
||||
repo_pub_key, |
||||
QuorumType::NoSigning, |
||||
vec![], |
||||
vec![repository_commit_ref.clone()], |
||||
root_branch_commit_body, |
||||
&self, |
||||
)?; |
||||
|
||||
log_debug!("ROOT_BRANCH COMMIT {}", root_branch_commit); |
||||
let root_branch_readcap = root_branch_commit.reference().unwrap(); |
||||
let root_branch_readcap_id = root_branch_readcap.id; |
||||
// adding the 2 events for the Repository and Rootbranch commits
|
||||
|
||||
//peer_last_seq_num += 1;
|
||||
events.push((repository_commit, vec![])); |
||||
// events.push(Event::new(
|
||||
// publisher_peer,
|
||||
// peer_last_seq_num,
|
||||
// &repository_commit,
|
||||
// &vec![],
|
||||
// topic_pub_key,
|
||||
// root_branch_commit.key().unwrap(),
|
||||
// &topic_priv_key,
|
||||
// store,
|
||||
// )?);
|
||||
|
||||
//peer_last_seq_num += 1;
|
||||
events.push((root_branch_commit, vec![])); |
||||
// events.push(Event::new(
|
||||
// publisher_peer,
|
||||
// peer_last_seq_num,
|
||||
// &root_branch_commit,
|
||||
// &vec![],
|
||||
// topic_pub_key,
|
||||
// root_branch_commit.key().unwrap(),
|
||||
// &topic_priv_key,
|
||||
// storage,
|
||||
// )?);
|
||||
|
||||
// creating the main branch
|
||||
|
||||
let (main_branch_priv_key, main_branch_pub_key) = generate_keypair(); |
||||
|
||||
let (main_branch_topic_priv_key, main_branch_topic_pub_key) = generate_keypair(); |
||||
|
||||
let main_branch_commit_body = CommitBody::V0(CommitBodyV0::Branch(Branch::V0(BranchV0 { |
||||
id: main_branch_pub_key, |
||||
content_type: BranchContentType::None, |
||||
repo: repository_commit_ref.clone(), |
||||
root_branch_readcap_id, |
||||
topic: main_branch_topic_pub_key, |
||||
topic_privkey: Branch::encrypt_topic_priv_key( |
||||
&main_branch_topic_priv_key, |
||||
main_branch_topic_pub_key, |
||||
main_branch_pub_key, |
||||
&repo_write_cap_secret, |
||||
), |
||||
metadata: vec![], |
||||
}))); |
||||
|
||||
let main_branch_commit = Commit::new_with_body_acks_deps_and_save( |
||||
&main_branch_priv_key, |
||||
&main_branch_pub_key, |
||||
main_branch_pub_key, |
||||
QuorumType::NoSigning, |
||||
vec![], |
||||
vec![], |
||||
main_branch_commit_body, |
||||
&self, |
||||
)?; |
||||
let branch_read_cap = main_branch_commit.reference().unwrap(); |
||||
let branch_read_cap_id = branch_read_cap.id; |
||||
|
||||
log_debug!("MAIN BRANCH COMMIT {}", main_branch_commit); |
||||
|
||||
// adding the event for the Branch commit
|
||||
|
||||
// peer_last_seq_num += 1;
|
||||
events.push((main_branch_commit, vec![])); |
||||
// events.push(Event::new(
|
||||
// publisher_peer,
|
||||
// peer_last_seq_num,
|
||||
// &main_branch_commit,
|
||||
// &vec![],
|
||||
// main_branch_topic_pub_key,
|
||||
// main_branch_commit.key().unwrap(),
|
||||
// &main_branch_topic_priv_key,
|
||||
// storage,
|
||||
// )?);
|
||||
|
||||
// creating the AddBranch commit (on root_branch), deps to the RootBranch commit
|
||||
// author is the owner
|
||||
|
||||
let add_branch_commit_body = |
||||
CommitBody::V0(CommitBodyV0::AddBranch(AddBranch::V0(AddBranchV0 { |
||||
branch_type: BranchType::Main, |
||||
topic_id: main_branch_topic_pub_key, |
||||
branch_read_cap: branch_read_cap.clone(), |
||||
}))); |
||||
|
||||
let add_branch_commit = Commit::new_with_body_acks_deps_and_save( |
||||
creator_priv_key, |
||||
creator, |
||||
repo_pub_key, |
||||
QuorumType::Owners, |
||||
vec![root_branch_readcap.clone()], |
||||
vec![], |
||||
add_branch_commit_body, |
||||
&self, |
||||
)?; |
||||
|
||||
log_debug!("ADD_BRANCH COMMIT {}", add_branch_commit); |
||||
|
||||
// TODO: optional AddMember and AddPermission, that should be added as deps to the SynSignature below (and to the commits of the SignatureContent)
|
||||
// using the creator as author (and incrementing their peer's seq_num)
|
||||
|
||||
// preparing the threshold keys for the unique owner
|
||||
let mut rng = rand::thread_rng(); |
||||
let sk_set = SecretKeySet::random(0, &mut rng); |
||||
let pk_set = sk_set.public_keys(); |
||||
|
||||
let sk_share = sk_set.secret_key_share(0); |
||||
|
||||
// creating signature for RootBranch, AddBranch and Branch commits
|
||||
// signed with owner threshold signature (threshold = 0)
|
||||
|
||||
let signature_content = SignatureContent::V0(SignatureContentV0 { |
||||
commits: vec![ |
||||
root_branch_readcap_id, |
||||
add_branch_commit.id().unwrap(), |
||||
branch_read_cap_id, |
||||
], |
||||
}); |
||||
|
||||
let signature_content_ser = serde_bare::to_vec(&signature_content).unwrap(); |
||||
let sig_share = sk_share.sign(signature_content_ser); |
||||
let sig = pk_set |
||||
.combine_signatures([(0, &sig_share)]) |
||||
.map_err(|_| NgError::IncompleteSignature)?; |
||||
|
||||
let threshold_sig = ThresholdSignatureV0::Owners((sig)); |
||||
|
||||
// creating root certificate of the repo
|
||||
|
||||
let cert_content = CertificateContentV0 { |
||||
previous: repository_commit_ref, |
||||
readcap_id: root_branch_readcap_id, |
||||
owners_pk_set: pk_set.public_key(), |
||||
orders_pk_sets: OrdersPublicKeySetsV0::None, |
||||
}; |
||||
|
||||
// signing the root certificate
|
||||
let cert_content_ser = serde_bare::to_vec(&cert_content).unwrap(); |
||||
let sig = sign(&repo_priv_key, &repo_pub_key, &cert_content_ser)?; |
||||
let cert_sig = CertificateSignatureV0::Repo(sig); |
||||
|
||||
let cert = Certificate::V0(CertificateV0 { |
||||
content: cert_content, |
||||
sig: cert_sig, |
||||
}); |
||||
// saving the certificate
|
||||
let cert_object = Object::new( |
||||
ObjectContent::V0(ObjectContentV0::Certificate(cert)), |
||||
None, |
||||
0, |
||||
&self, |
||||
); |
||||
let mut cert_obj_blocks = cert_object.save(&self)?; |
||||
|
||||
// finally getting the signature:
|
||||
|
||||
let signature = Signature::V0(SignatureV0 { |
||||
content: signature_content, |
||||
threshold_sig, |
||||
certificate_ref: cert_object.reference().unwrap(), |
||||
}); |
||||
|
||||
// saving the signature
|
||||
let sig_object = Object::new( |
||||
ObjectContent::V0(ObjectContentV0::Signature(signature)), |
||||
None, |
||||
0, |
||||
&self, |
||||
); |
||||
let mut sig_obj_blocks = sig_object.save(&self)?; |
||||
|
||||
// keeping the Secret Key Share of the owner
|
||||
let signer_cap = SignerCap { |
||||
repo: repo_pub_key, |
||||
epoch: root_branch_readcap_id, |
||||
owner: Some(threshold_crypto::serde_impl::SerdeSecret(sk_share)), |
||||
total_order: None, |
||||
partial_order: None, |
||||
}; |
||||
|
||||
let sync_signature = SyncSignature::V0(sig_object.reference().unwrap()); |
||||
|
||||
// creating the SyncSignature for the root_branch with deps to the AddBranch and acks to the RootBranch commit as it is its direct causal future.
|
||||
let sync_sig_commit_body = CommitBody::V0(CommitBodyV0::SyncSignature(sync_signature)); |
||||
|
||||
let sync_sig_on_root_branch_commit = Commit::new_with_body_acks_deps_and_save( |
||||
creator_priv_key, |
||||
creator, |
||||
repo_pub_key, |
||||
QuorumType::IamTheSignature, |
||||
vec![add_branch_commit.reference().unwrap()], |
||||
vec![root_branch_readcap], |
||||
sync_sig_commit_body.clone(), |
||||
&self, |
||||
)?; |
||||
|
||||
// adding the event for the sync_sig_on_root_branch_commit
|
||||
|
||||
let mut additional_blocks = Vec::with_capacity( |
||||
cert_obj_blocks.len() + sig_obj_blocks.len() + add_branch_commit.blocks().len(), |
||||
); |
||||
additional_blocks.extend(cert_obj_blocks.iter()); |
||||
additional_blocks.extend(sig_obj_blocks.iter()); |
||||
additional_blocks.extend(add_branch_commit.blocks().iter()); |
||||
|
||||
//peer_last_seq_num += 1;
|
||||
events.push((sync_sig_on_root_branch_commit, additional_blocks)); |
||||
// events.push(Event::new(
|
||||
// publisher_peer,
|
||||
// peer_last_seq_num,
|
||||
// &sync_sig_on_root_branch_commit,
|
||||
// &additional_blocks,
|
||||
// topic_pub_key,
|
||||
// root_branch_commit.key().unwrap(),
|
||||
// &topic_priv_key,
|
||||
// storage,
|
||||
// )?);
|
||||
|
||||
// creating the SyncSignature for the main branch with deps to the Branch commit and acks also to this commit as it is its direct causal future.
|
||||
|
||||
let sync_sig_on_main_branch_commit = Commit::new_with_body_acks_deps_and_save( |
||||
creator_priv_key, |
||||
creator, |
||||
main_branch_pub_key, |
||||
QuorumType::IamTheSignature, |
||||
vec![branch_read_cap.clone()], |
||||
vec![branch_read_cap], |
||||
sync_sig_commit_body, |
||||
&self, |
||||
)?; |
||||
|
||||
// adding the event for the sync_sig_on_main_branch_commit
|
||||
|
||||
let mut additional_blocks = |
||||
Vec::with_capacity(cert_obj_blocks.len() + sig_obj_blocks.len()); |
||||
additional_blocks.append(&mut cert_obj_blocks); |
||||
additional_blocks.append(&mut sig_obj_blocks); |
||||
|
||||
// peer_last_seq_num += 1;
|
||||
events.push((sync_sig_on_main_branch_commit, additional_blocks)); |
||||
// events.push(Event::new(
|
||||
// publisher_peer,
|
||||
// peer_last_seq_num,
|
||||
// &sync_sig_on_main_branch_commit,
|
||||
// &additional_blocks,
|
||||
// main_branch_topic_pub_key,
|
||||
// main_branch_commit.key().unwrap(),
|
||||
// &main_branch_topic_priv_key,
|
||||
// storage,
|
||||
// )?);
|
||||
|
||||
// TODO: add the CertificateRefresh event on main branch
|
||||
|
||||
// += 1;
|
||||
|
||||
// preparing the Repo
|
||||
|
||||
let repo = Repo { |
||||
id: repo_pub_key, |
||||
repo_def: repository, |
||||
signer: Some(signer_cap), |
||||
members: HashMap::new(), |
||||
store: self, |
||||
}; |
||||
|
||||
//let repo_ref = self.repos.entry(repo_pub_key).or_insert(repo);
|
||||
Ok((repo, events)) |
||||
} |
||||
|
||||
pub fn new( |
||||
store_repo: StoreRepo, |
||||
store_readcap: ReadCap, |
||||
storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>, |
||||
) -> Self { |
||||
Self { |
||||
store_repo, |
||||
store_readcap, |
||||
overlay_id: store_repo.overlay_id_for_storage_purpose(), |
||||
storage, |
||||
//repos: HashMap::new(),
|
||||
} |
||||
} |
||||
|
||||
#[cfg(test)] |
||||
#[allow(deprecated)] |
||||
pub fn dummy_public_v0() -> Box<Self> { |
||||
use crate::block_storage::HashMapBlockStorage; |
||||
let store_repo = StoreRepo::dummy_public_v0(); |
||||
let store_readcap = ReadCap::dummy(); |
||||
//let storage = Box::new() as Box<dyn BlockStorage + Send + Sync>;
|
||||
Box::new(Self::new( |
||||
store_repo, |
||||
store_readcap, |
||||
Arc::new(RwLock::new(HashMapBlockStorage::new())) |
||||
as Arc<RwLock<dyn BlockStorage + Send + Sync>>, |
||||
)) |
||||
} |
||||
|
||||
#[cfg(test)] |
||||
pub fn dummy_with_key(repo_pubkey: PubKey) -> Box<Self> { |
||||
use crate::block_storage::HashMapBlockStorage; |
||||
let store_repo = StoreRepo::dummy_with_key(repo_pubkey); |
||||
let store_readcap = ReadCap::dummy(); |
||||
//let storage = Box::new() as Box<dyn BlockStorage + Send + Sync>;
|
||||
Box::new(Self::new( |
||||
store_repo, |
||||
store_readcap, |
||||
Arc::new(RwLock::new(HashMapBlockStorage::new())) |
||||
as Arc<RwLock<dyn BlockStorage + Send + Sync>>, |
||||
)) |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,309 @@ |
||||
// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
|
||||
// All rights reserved.
|
||||
// Licensed under the Apache License, Version 2.0
|
||||
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
|
||||
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
|
||||
// at your option. All files in the project carrying such
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
//! Repo object (on heap) to handle a Repository
|
||||
|
||||
use crate::types::*; |
||||
use ng_repo::log::*; |
||||
use ng_repo::object::Object; |
||||
use ng_repo::{ |
||||
block_storage::BlockStorage, |
||||
errors::{NgError, StorageError}, |
||||
file::RandomAccessFile, |
||||
repo::Repo, |
||||
store::Store, |
||||
types::*, |
||||
utils::{generate_keypair, sign}, |
||||
}; |
||||
|
||||
use core::fmt; |
||||
//use oxigraph::io::{RdfFormat, RdfParser, RdfSerializer};
|
||||
//use oxigraph::store::Store;
|
||||
//use oxigraph::model::GroundQuad;
|
||||
#[cfg(not(target_family = "wasm"))] |
||||
use crate::rocksdb_user_storage::RocksDbUserStorage; |
||||
use crate::user_storage::{InMemoryUserStorage, UserStorage}; |
||||
use async_std::sync::Mutex; |
||||
use std::{collections::HashMap, path::PathBuf, sync::Arc}; |
||||
|
||||
use ng_net::{ |
||||
connection::NoiseFSM, |
||||
errors::ProtocolError, |
||||
types::*, |
||||
utils::{Receiver, Sender}, |
||||
}; |
||||
|
||||
use serde::{Deserialize, Serialize}; |
||||
use web_time::SystemTime; |
||||
//use yrs::{StateVector, Update};
|
||||
|
||||
pub struct Verifier { |
||||
pub config: VerifierConfig, |
||||
pub connected_server_id: Option<PubKey>, |
||||
graph_dataset: Option<oxigraph::store::Store>, |
||||
user_storage: Option<Box<dyn UserStorage>>, |
||||
block_storage: Option<Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>>, |
||||
last_seq_num: u64, |
||||
peer_id: PubKey, |
||||
max_reserved_seq_num: u64, |
||||
last_reservation: SystemTime, |
||||
stores: HashMap<OverlayId, Store>, |
||||
repos: HashMap<RepoId, Repo>, |
||||
} |
||||
|
||||
impl fmt::Debug for Verifier { |
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { |
||||
writeln!(f, "Verifier\nconfig: {:?}", self.config)?; |
||||
writeln!(f, "connected_server_id: {:?}", self.connected_server_id) |
||||
} |
||||
} |
||||
|
||||
impl Verifier { |
||||
#[cfg(test)] |
||||
pub fn new_dummy() -> Self { |
||||
let (peer_priv_key, peer_id) = generate_keypair(); |
||||
let block_storage = Arc::new(RwLock::new(HashMapBlockStorage::new())) |
||||
as Arc<RwLock<Box<dyn BlockStorage + Send + Sync + 'static>>>; |
||||
Verifier { |
||||
config: VerifierConfig { |
||||
config_type: VerifierConfigType::Memory, |
||||
user_master_key: [0; 32], |
||||
peer_priv_key, |
||||
user_priv_key: PrivKey::random_ed(), |
||||
private_store_read_cap: ObjectRef::dummy(), |
||||
}, |
||||
connected_server_id: None, |
||||
graph_dataset: None, |
||||
user_storage: None, |
||||
block_storage: Some(block_storage), |
||||
last_seq_num: 0, |
||||
peer_id, |
||||
max_reserved_seq_num: 1, |
||||
last_reservation: SystemTime::now(), |
||||
stores: HashMap::new(), |
||||
repos: HashMap::new(), |
||||
} |
||||
} |
||||
|
||||
pub fn get_store(&mut self, store_repo: &StoreRepo) -> &mut Store { |
||||
let overlay_id = store_repo.overlay_id_for_storage_purpose(); |
||||
if self.stores.get(&overlay_id).is_none() { |
||||
// FIXME: get store_readcap from user storage
|
||||
let store_readcap = ReadCap::nil(); |
||||
let store = Store::new( |
||||
*store_repo, |
||||
store_readcap, |
||||
Arc::clone( |
||||
&self |
||||
.block_storage |
||||
.as_ref() |
||||
.ok_or(core::fmt::Error) |
||||
.expect("get_store cannot be called on Remote Verifier"), |
||||
), |
||||
); |
||||
//self.stores.insert(overlay_id, store);
|
||||
let store = self.stores.entry(overlay_id).or_insert(store); |
||||
store |
||||
} else { |
||||
self.stores.get_mut(&overlay_id).unwrap() |
||||
} |
||||
} |
||||
|
||||
pub(crate) fn new_event( |
||||
&mut self, |
||||
//publisher: &PrivKey,
|
||||
//seq: &mut u64,
|
||||
commit: &Commit, |
||||
additional_blocks: &Vec<BlockId>, |
||||
//topic_id: TopicId,
|
||||
//topic_priv_key: &BranchWriteCapSecret,
|
||||
store: &Store, // store could be omitted and a store repo ID would be given instead.
|
||||
) -> Result<Event, NgError> { |
||||
let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
|
||||
let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
|
||||
let seq = self.last_seq_number()?; |
||||
Event::new( |
||||
&self.config.peer_priv_key, |
||||
seq, |
||||
commit, |
||||
additional_blocks, |
||||
topic_id, |
||||
&topic_priv_key, |
||||
store, |
||||
) |
||||
} |
||||
|
||||
pub(crate) fn last_seq_number(&mut self) -> Result<u64, NgError> { |
||||
if self.last_seq_num - 1 >= self.max_reserved_seq_num { |
||||
self.reserve_more(1)?; |
||||
} |
||||
self.last_seq_num += 1; |
||||
Ok(self.last_seq_num) |
||||
} |
||||
|
||||
pub(crate) fn new_events( |
||||
&mut self, |
||||
events: Vec<(Commit, Vec<Digest>)>, |
||||
store: &Store, |
||||
) -> Result<Vec<Event>, NgError> { |
||||
let missing_count = events.len() as i64 - self.available_seq_nums() as i64; |
||||
// this is reducing the capacity of reserver_seq_num by half (cast from u64 to i64)
|
||||
// but we will never reach situation where so many seq_nums are reserved, neither such a big list of events to processs
|
||||
if missing_count >= 0 { |
||||
self.reserve_more(missing_count as u64 + 1)?; |
||||
} |
||||
let mut res = vec![]; |
||||
for event in events { |
||||
let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
|
||||
let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
|
||||
self.last_seq_num += 1; |
||||
let event = Event::new( |
||||
&self.config.peer_priv_key, |
||||
self.last_seq_num, |
||||
&event.0, |
||||
&event.1, |
||||
topic_id, |
||||
&topic_priv_key, |
||||
store, |
||||
)?; |
||||
res.push(event); |
||||
} |
||||
Ok(res) |
||||
} |
||||
|
||||
fn available_seq_nums(&self) -> u64 { |
||||
self.max_reserved_seq_num - self.last_seq_num |
||||
} |
||||
|
||||
fn reserve_more(&mut self, at_least: u64) -> Result<(), NgError> { |
||||
// the qty should be calculated based on the last_reservation. the closer to now, the higher the qty.
|
||||
// below 1 sec, => 100
|
||||
// below 5 sec, => 10
|
||||
// below 10 sec => 1
|
||||
self.take_some_peer_last_seq_numbers(10) |
||||
} |
||||
|
||||
fn take_some_peer_last_seq_numbers(&mut self, qty: u16) -> Result<(), NgError> { |
||||
// TODO the magic
|
||||
|
||||
Ok(()) |
||||
} |
||||
|
||||
pub fn new( |
||||
config: VerifierConfig, |
||||
block_storage: Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>, |
||||
) -> Result<Self, NgError> { |
||||
let (graph, user, block) = match &config.config_type { |
||||
VerifierConfigType::Memory | VerifierConfigType::JsSaveSession(_) => ( |
||||
Some(oxigraph::store::Store::new().unwrap()), |
||||
Some(Box::new(InMemoryUserStorage::new()) as Box<dyn UserStorage>), |
||||
Some(block_storage), |
||||
), |
||||
#[cfg(not(target_family = "wasm"))] |
||||
VerifierConfigType::RocksDb(path) => ( |
||||
// FIXME BIG TIME: we are reusing the same encryption key here.
|
||||
// this is very temporary, until we remove the code in oxi_rocksdb of oxigraph,
|
||||
// and have oxigraph use directly the UserStorage
|
||||
Some(oxigraph::store::Store::open_with_key(path, config.user_master_key).unwrap()), |
||||
Some( |
||||
Box::new(RocksDbUserStorage::open(path, config.user_master_key)?) |
||||
as Box<dyn UserStorage>, |
||||
), |
||||
Some(block_storage), |
||||
), |
||||
VerifierConfigType::Remote(_) => (None, None, None), |
||||
_ => unimplemented!(), // can be WebRocksDb or RocksDb on wasm platforms
|
||||
}; |
||||
let peer_id = config.peer_priv_key.to_pub(); |
||||
let mut verif = Verifier { |
||||
config, |
||||
connected_server_id: None, |
||||
graph_dataset: graph, |
||||
user_storage: user, |
||||
block_storage: block, |
||||
peer_id, |
||||
last_reservation: SystemTime::now(), |
||||
max_reserved_seq_num: 0, |
||||
last_seq_num: 0, |
||||
stores: HashMap::new(), |
||||
repos: HashMap::new(), |
||||
}; |
||||
verif.take_some_peer_last_seq_numbers(1)?; |
||||
Ok(verif) |
||||
} |
||||
|
||||
pub fn doc_fetch( |
||||
&mut self, |
||||
nuri: String, |
||||
payload: Option<AppRequestPayload>, |
||||
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> { |
||||
unimplemented!(); |
||||
} |
||||
|
||||
pub async fn respond( |
||||
&mut self, |
||||
msg: ProtocolMessage, |
||||
fsm: Arc<Mutex<NoiseFSM>>, |
||||
) -> Result<(), ProtocolError> { |
||||
unimplemented!(); |
||||
} |
||||
|
||||
/// returns the Repo and the last seq_num of the peer
|
||||
pub fn new_repo_default<'a>( |
||||
&'a mut self, |
||||
creator: &UserId, |
||||
creator_priv_key: &PrivKey, |
||||
//store_repo: &StoreRepo,
|
||||
store: Box<Store>, |
||||
) -> Result<(&'a Repo, Vec<Event>), NgError> { |
||||
//let store = self.get_store(store_repo);
|
||||
let (repo, proto_events) = store.create_repo_default(creator, creator_priv_key)?; |
||||
|
||||
//repo.store = Some(store);
|
||||
let events = self.new_events(proto_events, &repo.store)?; |
||||
|
||||
let repo_ref = self.repos.entry(repo.id).or_insert(repo); |
||||
Ok((repo_ref, events)) |
||||
} |
||||
} |
||||
#[cfg(test)] |
||||
mod test { |
||||
|
||||
use crate::types::*; |
||||
use crate::verifier::*; |
||||
use ng_repo::log::*; |
||||
|
||||
#[test] |
||||
pub fn test_new_repo_default() { |
||||
let (creator_priv_key, creator_pub_key) = generate_keypair(); |
||||
|
||||
let (publisher_privkey, publisher_pubkey) = generate_keypair(); |
||||
let publisher_peer = PeerId::Forwarded(publisher_pubkey); |
||||
|
||||
let store = Store::dummy_public_v0(); |
||||
|
||||
let mut verifier = Verifier::new_dummy(); |
||||
//let store = verifier.get_store(store_repo);
|
||||
|
||||
let (repo, events) = verifier |
||||
.new_repo_default(&creator_pub_key, &creator_priv_key, store) |
||||
.expect("new_default"); |
||||
|
||||
log_debug!("REPO OBJECT {}", repo); |
||||
|
||||
log_debug!("events: {}\n", events.len()); |
||||
let mut i = 0; |
||||
for e in events { |
||||
log_debug!("========== EVENT {:03}: {}", i, e); |
||||
i += 1; |
||||
} |
||||
|
||||
assert_eq!(verifier.last_seq_number(), 6); |
||||
} |
||||
} |
Loading…
Reference in new issue