user_storage keeps repos and stores. events sent to broker on first connection

pull/19/head
Niko PLP 8 months ago
parent 86e5ec52ef
commit 5fed085379
  1. 1
      Cargo.lock
  2. 1
      nextgraph/.gitignore
  3. 10
      nextgraph/examples/open.rs
  4. 1
      nextgraph/examples/persistent.rs
  5. 52
      nextgraph/src/local_broker.rs
  6. 5
      ng-app/src/routes/WalletCreate.svelte
  7. 14
      ng-broker/src/broker_storage/account.rs
  8. 13
      ng-broker/src/broker_storage/config.rs
  9. 12
      ng-broker/src/broker_storage/invitation.rs
  10. 8
      ng-broker/src/broker_storage/overlay.rs
  11. 13
      ng-broker/src/broker_storage/peer.rs
  12. 8
      ng-broker/src/broker_storage/topic.rs
  13. 6
      ng-broker/src/broker_storage/wallet.rs
  14. 72
      ng-broker/src/server_storage.rs
  15. 2
      ng-broker/src/server_ws.rs
  16. 5
      ng-net/src/actors/client/mod.rs
  17. 124
      ng-net/src/actors/client/pin_repo.rs
  18. 88
      ng-net/src/actors/client/repo_pin_status.rs
  19. 113
      ng-net/src/actors/client/topic_sub.rs
  20. 2
      ng-net/src/actors/mod.rs
  21. 32
      ng-net/src/broker.rs
  22. 3
      ng-net/src/errors.rs
  23. 24
      ng-net/src/server_storage.rs
  24. 297
      ng-net/src/types.rs
  25. 7
      ng-repo/src/errors.rs
  26. 11
      ng-repo/src/event.rs
  27. 12
      ng-repo/src/kcv_storage.rs
  28. 29
      ng-repo/src/repo.rs
  29. 12
      ng-repo/src/store.rs
  30. 33
      ng-repo/src/types.rs
  31. 30
      ng-storage-lmdb/src/kcv_storage.rs
  32. 8
      ng-storage-rocksdb/src/block_storage.rs
  33. 101
      ng-storage-rocksdb/src/kcv_storage.rs
  34. 1
      ng-verifier/Cargo.toml
  35. 37
      ng-verifier/src/rocksdb_user_storage.rs
  36. 27
      ng-verifier/src/site.rs
  37. 13
      ng-verifier/src/types.rs
  38. 173
      ng-verifier/src/user_storage/branch.rs
  39. 29
      ng-verifier/src/user_storage/mod.rs
  40. 352
      ng-verifier/src/user_storage/repo.rs
  41. 36
      ng-verifier/src/user_storage/storage.rs
  42. 390
      ng-verifier/src/verifier.rs
  43. 19
      ng-wallet/src/lib.rs
  44. 2
      ngcli/src/old.rs
  45. 6
      ngone/src/main.rs
  46. 8
      ngone/src/store/dynpeer.rs
  47. 11
      ngone/src/store/wallet_record.rs

1
Cargo.lock generated

@ -3419,6 +3419,7 @@ dependencies = [
"automerge",
"blake3",
"chacha20",
"either",
"getrandom 0.2.10",
"ng-net",
"ng-repo",

@ -0,0 +1 @@
tests

@ -37,7 +37,7 @@ async fn main() -> std::io::Result<()> {
}))
.await;
let wallet_name = "EJdLRVx93o3iUXoB0wSTqxh1-zYac-84vHb3oBbZ_HY".to_string();
let wallet_name = "hQK0RBKua5TUm2jqeSGPOMMzqplllAkbUgEh5P6Otf4".to_string();
// as we have previously saved the wallet,
// we can retrieve it, display the security phrase and image to the user, ask for the pazzle or mnemonic, and then open the wallet
@ -48,8 +48,8 @@ async fn main() -> std::io::Result<()> {
// now let's open the wallet, by providing the pazzle and PIN code
let opened_wallet = wallet_open_with_pazzle(
&wallet,
vec![117, 134, 59, 92, 98, 35, 70, 22, 9],
[1, 2, 1, 2],
vec![134, 54, 112, 46, 94, 65, 20, 2, 99],
[2, 3, 2, 3],
)?;
let user_id = opened_wallet.personal_identity();
@ -65,8 +65,8 @@ async fn main() -> std::io::Result<()> {
let status = user_connect(&user_id).await?;
// The connection cannot succeed because we miss-configured the core_bootstrap of the wallet. its Peer ID is invalid.
let error_reason = status[0].3.as_ref().unwrap();
assert!(error_reason == "NoiseHandshakeFailed" || error_reason == "ConnectionError");
println!("Connection was : {:?}", status[0]);
//assert!(error_reason == "NoiseHandshakeFailed" || error_reason == "ConnectionError");
// Then we should disconnect
user_disconnect(&user_id).await?;

@ -44,7 +44,6 @@ async fn main() -> std::io::Result<()> {
// the peer_id should come from somewhere else.
// this is just given for the sake of an example
#[allow(deprecated)]
let peer_id_of_server_broker = PubKey::nil();
// Create your wallet

@ -168,6 +168,12 @@ impl LocalBrokerConfig {
_ => false,
}
}
pub fn is_persistent(&self) -> bool {
match self {
Self::BasePath(_) => true,
_ => false,
}
}
#[doc(hidden)]
pub fn is_js(&self) -> bool {
match self {
@ -702,7 +708,7 @@ impl LocalBroker {
key_material.as_slice(),
);
key_material.zeroize();
let verifier = Verifier::new(
let mut verifier = Verifier::new(
VerifierConfig {
config_type: broker.verifier_config_type_from_session_config(&config),
user_master_key: key,
@ -714,6 +720,10 @@ impl LocalBroker {
block_storage,
)?;
key.zeroize();
//load verifier from local_storage (if rocks_db)
let _ = verifier.load();
broker.opened_sessions_list.push(Some(Session {
config,
peer_key: session.peer_key.clone(),
@ -722,6 +732,7 @@ impl LocalBroker {
}));
let idx = broker.opened_sessions_list.len() - 1;
broker.opened_sessions.insert(user_id, idx as u8);
Ok(SessionInfo {
session_id: idx as u8,
user: user_id,
@ -892,7 +903,7 @@ pub async fn wallet_create_v0(params: CreateWalletV0) -> Result<CreateWalletResu
.unwrap();
let (mut res, site, brokers) =
create_wallet_second_step_v0(intermediate, &mut session.verifier)?;
create_wallet_second_step_v0(intermediate, &mut session.verifier).await?;
broker.wallets.get_mut(&res.wallet_name).unwrap().wallet = res.wallet.clone();
LocalBroker::wallet_save(&mut broker)?;
@ -1256,6 +1267,13 @@ pub async fn user_connect_with_device_info(
if tried.is_some() && tried.as_ref().unwrap().3.is_none() {
session.verifier.connected_server_id = Some(server_key);
// successful. we can stop here
// we immediately send the events present in the outbox
let res = session.verifier.send_outbox().await;
log_info!("SENDING EVENTS FROM OUTBOX: {:?}", res);
// TODO: load verifier from remote connection (if not RocksDb type)
break;
} else {
log_debug!("Failed connection {:?}", tried);
@ -1406,6 +1424,7 @@ mod test {
};
use ng_net::types::BootstrapContentV0;
use ng_wallet::{display_mnemonic, emojis::display_pazzle};
use std::fs::read_to_string;
use std::fs::{create_dir_all, File};
use std::io::BufReader;
use std::io::Read;
@ -1431,7 +1450,7 @@ mod test {
init_local_broker(Box::new(|| LocalBrokerConfig::InMemory)).await;
#[allow(deprecated)]
let peer_id = "X0nh-gOTGKSx0yL0LYJviOWRNacyqIzjQW_LKdK6opU";
let peer_id_of_server_broker = PubKey::nil();
let wallet_result = wallet_create_v0(CreateWalletV0 {
@ -1495,7 +1514,30 @@ mod test {
file.write_all(&ser).expect("write of opened_wallet file");
}
async fn init_session_for_test() -> (UserId, String) {
#[async_std::test]
async fn gen_opened_wallet_file_for_test() {
let wallet_file = read("tests/wallet.ngw").expect("read wallet file");
init_local_broker(Box::new(|| LocalBrokerConfig::InMemory)).await;
let wallet = wallet_read_file(wallet_file)
.await
.expect("wallet_read_file");
let pazzle_string = read_to_string("tests/wallet.pazzle").expect("read pazzle file");
let pazzle_words = pazzle_string.split(' ').map(|s| s.to_string()).collect();
let opened_wallet = wallet_open_with_pazzle_words(&wallet, &pazzle_words, [2, 3, 2, 3])
.expect("opening of wallet");
let mut file =
File::create("tests/opened_wallet.ngw").expect("open for write opened_wallet file");
let ser = serde_bare::to_vec(&opened_wallet).expect("serialization of opened wallet");
file.write_all(&ser).expect("write of opened_wallet file");
}
async fn import_session_for_test() -> (UserId, String) {
let wallet_file = read("tests/wallet.ngw").expect("read wallet file");
let opened_wallet_file = read("tests/opened_wallet.ngw").expect("read opened_wallet file");
let opened_wallet: SensitiveWallet =
@ -1523,7 +1565,7 @@ mod test {
#[async_std::test]
async fn import_wallet() {
let (user_id, wallet_name) = init_session_for_test().await;
let (user_id, wallet_name) = import_session_for_test().await;
let status = user_connect(&user_id).await.expect("user_connect");

@ -1527,7 +1527,10 @@
/>
then throw it away.<br /> The order of each image is important.<br
/>
Now click on "Continue to Login"<br /><br />
Now click on "Continue to Login."<br /><br />It is important that
you login with this wallet at least once from this device<br />
(while connected to the internet), so that your personal site is
created on your broker.<br /><br />
<a href="/wallet/login" use:link>
<button
tabindex="-1"

@ -16,7 +16,7 @@ use std::time::SystemTime;
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::log::*;
use ng_repo::types::UserId;
use serde_bare::{from_slice, to_vec};
@ -24,7 +24,7 @@ use serde_bare::{from_slice, to_vec};
pub struct Account<'a> {
/// User ID
id: UserId,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Account<'a> {
@ -38,7 +38,7 @@ impl<'a> Account<'a> {
const ALL_CLIENT_PROPERTIES: [u8; 2] = [Self::INFO, Self::LAST_SEEN];
pub fn open(id: &UserId, store: &'a dyn KCVStore) -> Result<Account<'a>, StorageError> {
pub fn open(id: &UserId, store: &'a dyn KCVStorage) -> Result<Account<'a>, StorageError> {
let opening = Account {
id: id.clone(),
store,
@ -51,7 +51,7 @@ impl<'a> Account<'a> {
pub fn create(
id: &UserId,
admin: bool,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<Account<'a>, StorageError> {
let acc = Account {
id: id.clone(),
@ -73,7 +73,7 @@ impl<'a> Account<'a> {
#[allow(deprecated)]
pub fn get_all_users(
admins: bool,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<Vec<UserId>, StorageError> {
let size = to_vec(&UserId::nil())?.len();
let mut res: Vec<UserId> = vec![];
@ -248,7 +248,7 @@ mod test {
use ng_repo::errors::StorageError;
use ng_repo::types::*;
use ng_repo::utils::*;
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStore;
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStorage;
use std::fs;
use tempfile::Builder;
@ -261,7 +261,7 @@ mod test {
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = RocksdbKCVStore::open(root.path(), key).unwrap();
let mut store = RocksdbKCVStorage::open(root.path(), key).unwrap();
let user_id = PubKey::Ed25519PubKey([1; 32]);

@ -11,7 +11,7 @@
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
@ -24,7 +24,7 @@ pub enum ConfigMode {
}
pub struct Config<'a> {
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Config<'a> {
@ -39,7 +39,7 @@ impl<'a> Config<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::MODE;
pub fn open(store: &'a dyn KCVStore) -> Result<Config<'a>, StorageError> {
pub fn open(store: &'a dyn KCVStorage) -> Result<Config<'a>, StorageError> {
let opening = Config { store };
if !opening.exists() {
return Err(StorageError::NotFound);
@ -48,7 +48,7 @@ impl<'a> Config<'a> {
}
pub fn get_or_create(
mode: &ConfigMode,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<Config<'a>, StorageError> {
match Self::open(store) {
Err(e) => {
@ -66,7 +66,10 @@ impl<'a> Config<'a> {
}
}
}
pub fn create(mode: &ConfigMode, store: &'a dyn KCVStore) -> Result<Config<'a>, StorageError> {
pub fn create(
mode: &ConfigMode,
store: &'a dyn KCVStorage,
) -> Result<Config<'a>, StorageError> {
let acc = Config { store };
if acc.exists() {
return Err(StorageError::BackendError);

@ -17,7 +17,7 @@ use std::time::SystemTime;
use ng_net::errors::ProtocolError;
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::SymKey;
use ng_repo::types::Timestamp;
use ng_repo::utils::now_timestamp;
@ -25,9 +25,9 @@ use serde_bare::from_slice;
use serde_bare::to_vec;
pub struct Invitation<'a> {
/// User ID
/// code
id: [u8; 32],
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Invitation<'a> {
@ -45,7 +45,7 @@ impl<'a> Invitation<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::TYPE;
pub fn open(id: &[u8; 32], store: &'a dyn KCVStore) -> Result<Invitation<'a>, StorageError> {
pub fn open(id: &[u8; 32], store: &'a dyn KCVStorage) -> Result<Invitation<'a>, StorageError> {
let opening = Invitation {
id: id.clone(),
store,
@ -59,7 +59,7 @@ impl<'a> Invitation<'a> {
id: &InvitationCode,
expiry: u32,
memo: &Option<String>,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<Invitation<'a>, StorageError> {
let (code_type, code) = match id {
InvitationCode::Unique(c) => (0u8, c.slice()),
@ -88,7 +88,7 @@ impl<'a> Invitation<'a> {
}
pub fn get_all_invitations(
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
mut admin: bool,
mut unique: bool,
mut multi: bool,

@ -11,7 +11,7 @@
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::*;
use ng_repo::utils::now_timestamp;
use serde::{Deserialize, Serialize};
@ -27,7 +27,7 @@ pub struct OverlayMeta {
pub struct Overlay<'a> {
/// Overlay ID
id: OverlayId,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Overlay<'a> {
@ -50,7 +50,7 @@ impl<'a> Overlay<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::SECRET;
pub fn open(id: &OverlayId, store: &'a dyn KCVStore) -> Result<Overlay<'a>, StorageError> {
pub fn open(id: &OverlayId, store: &'a dyn KCVStorage) -> Result<Overlay<'a>, StorageError> {
let opening = Overlay {
id: id.clone(),
store,
@ -64,7 +64,7 @@ impl<'a> Overlay<'a> {
id: &OverlayId,
secret: &SymKey,
repo: Option<PubKey>,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<Overlay<'a>, StorageError> {
let acc = Overlay {
id: id.clone(),

@ -11,7 +11,7 @@
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
@ -19,7 +19,7 @@ use serde_bare::{from_slice, to_vec};
pub struct Peer<'a> {
/// Topic ID
id: PeerId,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Peer<'a> {
@ -33,7 +33,7 @@ impl<'a> Peer<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::VERSION;
pub fn open(id: &PeerId, store: &'a dyn KCVStore) -> Result<Peer<'a>, StorageError> {
pub fn open(id: &PeerId, store: &'a dyn KCVStorage) -> Result<Peer<'a>, StorageError> {
let opening = Peer {
id: id.clone(),
store,
@ -45,7 +45,7 @@ impl<'a> Peer<'a> {
}
pub fn update_or_create(
advert: &PeerAdvert,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<Peer<'a>, StorageError> {
let id = advert.peer();
match Self::open(id, store) {
@ -62,7 +62,10 @@ impl<'a> Peer<'a> {
}
}
}
pub fn create(advert: &PeerAdvert, store: &'a dyn KCVStore) -> Result<Peer<'a>, StorageError> {
pub fn create(
advert: &PeerAdvert,
store: &'a dyn KCVStorage,
) -> Result<Peer<'a>, StorageError> {
let id = advert.peer();
let acc = Peer {
id: id.clone(),

@ -11,7 +11,7 @@
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
@ -25,7 +25,7 @@ pub struct TopicMeta {
pub struct Topic<'a> {
/// Topic ID
id: TopicId,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Topic<'a> {
@ -40,7 +40,7 @@ impl<'a> Topic<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::META;
pub fn open(id: &TopicId, store: &'a dyn KCVStore) -> Result<Topic<'a>, StorageError> {
pub fn open(id: &TopicId, store: &'a dyn KCVStorage) -> Result<Topic<'a>, StorageError> {
let opening = Topic {
id: id.clone(),
store,
@ -50,7 +50,7 @@ impl<'a> Topic<'a> {
}
Ok(opening)
}
pub fn create(id: &TopicId, store: &'a mut dyn KCVStore) -> Result<Topic<'a>, StorageError> {
pub fn create(id: &TopicId, store: &'a mut dyn KCVStorage) -> Result<Topic<'a>, StorageError> {
let acc = Topic {
id: id.clone(),
store,

@ -11,7 +11,7 @@
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::kcv_storage::WriteTransaction;
use ng_repo::log::*;
use ng_repo::types::*;
@ -19,7 +19,7 @@ use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
pub struct Wallet<'a> {
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> Wallet<'a> {
@ -37,7 +37,7 @@ impl<'a> Wallet<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::SYM_KEY;
pub fn open(store: &'a dyn KCVStore) -> Wallet<'a> {
pub fn open(store: &'a dyn KCVStorage) -> Wallet<'a> {
Wallet { store }
}
pub fn get_or_create_single_key(

@ -21,17 +21,17 @@ use crate::broker_storage::wallet::Wallet;
use crate::types::*;
use ng_net::errors::{ProtocolError, ServerError};
use ng_net::server_storage::*;
use ng_net::types::{BootstrapContentV0, InvitationCode, InvitationV0};
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::log::*;
use ng_repo::types::{PeerId, PubKey, SymKey};
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStore;
use ng_repo::types::*;
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStorage;
pub struct RocksdbServerStorage {
wallet_storage: RocksdbKCVStore,
accounts_storage: RocksdbKCVStore,
peers_storage: RocksdbKCVStore,
wallet_storage: RocksdbKCVStorage,
accounts_storage: RocksdbKCVStorage,
peers_storage: RocksdbKCVStorage,
peers_last_seq_path: PathBuf,
peers_last_seq: Mutex<HashMap<PeerId, u64>>,
}
@ -48,7 +48,7 @@ impl RocksdbServerStorage {
std::fs::create_dir_all(wallet_path.clone()).unwrap();
log_debug!("opening wallet DB");
//TODO redo the whole key passing mechanism in RKV so it uses zeroize all the way
let wallet_storage = RocksdbKCVStore::open(&wallet_path, master_key.slice().clone())?;
let wallet_storage = RocksdbKCVStorage::open(&wallet_path, master_key.slice().clone())?;
let wallet = Wallet::open(&wallet_storage);
// create/open the ACCOUNTS storage
@ -60,7 +60,7 @@ impl RocksdbServerStorage {
accounts_key = wallet.create_accounts_key()?;
std::fs::create_dir_all(accounts_path.clone()).unwrap();
let accounts_storage =
RocksdbKCVStore::open(&accounts_path, accounts_key.slice().clone())?;
RocksdbKCVStorage::open(&accounts_path, accounts_key.slice().clone())?;
let symkey = SymKey::random();
let invite_code = InvitationCode::Admin(symkey.clone());
let _ = Invitation::create(
@ -87,7 +87,8 @@ impl RocksdbServerStorage {
log_debug!("opening accounts DB");
std::fs::create_dir_all(accounts_path.clone()).unwrap();
//TODO redo the whole key passing mechanism in RKV so it uses zeroize all the way
let accounts_storage = RocksdbKCVStore::open(&accounts_path, accounts_key.slice().clone())?;
let accounts_storage =
RocksdbKCVStorage::open(&accounts_path, accounts_key.slice().clone())?;
// create/open the PEERS storage
log_debug!("opening peers DB");
@ -96,7 +97,7 @@ impl RocksdbServerStorage {
peers_path.push("peers");
std::fs::create_dir_all(peers_path.clone()).unwrap();
//TODO redo the whole key passing mechanism in RKV so it uses zeroize all the way
let peers_storage = RocksdbKCVStore::open(&peers_path, peers_key.slice().clone())?;
let peers_storage = RocksdbKCVStorage::open(&peers_path, peers_key.slice().clone())?;
// creates the path for peers_last_seq
let mut peers_last_seq_path = path.clone();
@ -198,4 +199,53 @@ impl ServerStorage for RocksdbServerStorage {
inv.del()?;
Ok(())
}
fn get_repo_pin_status(
&self,
overlay: &OverlayId,
repo: &RepoHash,
) -> Result<RepoPinStatus, ProtocolError> {
//TODO: implement correctly !
Ok(RepoPinStatus::V0(RepoPinStatusV0 {
hash: repo.clone(),
// only possible for RW overlays
expose_outer: false,
// list of topics that are subscribed to
topics: vec![],
}))
}
fn pin_repo(
&self,
overlay: &OverlayId,
repo: &RepoHash,
ro_topics: &Vec<TopicId>,
rw_topics: &Vec<PublisherAdvert>,
) -> Result<RepoOpened, ProtocolError> {
//TODO: implement correctly !
let mut opened = Vec::with_capacity(ro_topics.len() + rw_topics.len());
for topic in ro_topics {
opened.push((*topic).into());
}
for topic in rw_topics {
opened.push((*topic).into());
}
Ok(opened)
}
fn topic_sub(
&self,
overlay: &OverlayId,
repo: &RepoHash,
topic: &TopicId,
publisher: Option<&PublisherAdvert>,
) -> Result<TopicSubRes, ProtocolError> {
//TODO: implement correctly !
Ok(TopicSubRes::V0(TopicSubResV0 {
topic: topic.clone(),
known_heads: vec![],
publisher: publisher.is_some(),
}))
}
}

@ -586,7 +586,7 @@ pub async fn run_server_accept_one(
// let master_key: [u8; 32] = [0; 32];
// std::fs::create_dir_all(root.path()).unwrap();
// log_debug!("data directory: {}", root.path().to_str().unwrap());
// let store = RocksdbKCVStore::open(root.path(), master_key);
// let store = RocksdbKCVStorage::open(root.path(), master_key);
let socket = TcpListener::bind(addrs.as_str()).await?;
log_debug!("Listening on {}", addrs.as_str());

@ -0,0 +1,5 @@
pub mod repo_pin_status;
pub mod pin_repo;
pub mod topic_sub;

@ -0,0 +1,124 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use crate::broker::{ServerConfig, BROKER};
use crate::connection::NoiseFSM;
use crate::types::*;
use crate::{actor::*, errors::ProtocolError, types::ProtocolMessage};
use async_std::sync::Mutex;
use ng_repo::log::*;
use ng_repo::repo::Repo;
use ng_repo::types::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
impl PinRepo {
pub fn get_actor(&self) -> Box<dyn EActor> {
Actor::<PinRepo, RepoOpened>::new_responder()
}
pub fn from_repo(repo: &Repo, broker_id: &DirectPeerId) -> PinRepo {
let overlay = OverlayAccess::ReadWrite((
OverlayId::inner_from_store(&repo.store),
OverlayId::outer(repo.store.id()),
));
let mut rw_topics = Vec::with_capacity(repo.branches.len());
let mut ro_topics = vec![];
for (_, branch) in repo.branches.iter() {
if let Some(privkey) = &branch.topic_priv_key {
rw_topics.push(PublisherAdvert::new(
branch.topic,
privkey.clone(),
*broker_id,
));
} else {
ro_topics.push(branch.topic);
}
}
PinRepo::V0(PinRepoV0 {
hash: repo.id.into(),
overlay,
// TODO: overlay_root_topic
overlay_root_topic: None,
expose_outer: false,
peers: vec![],
max_peer_count: 0,
allowed_peers: vec![],
ro_topics,
rw_topics,
})
}
}
impl TryFrom<ProtocolMessage> for PinRepo {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let req: ClientRequestContentV0 = msg.try_into()?;
if let ClientRequestContentV0::PinRepo(a) = req {
Ok(a)
} else {
log_debug!("INVALID {:?}", req);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<PinRepo> for ProtocolMessage {
fn from(msg: PinRepo) -> ProtocolMessage {
let overlay = match msg {
PinRepo::V0(ref v0) => v0.overlay.overlay_id_for_client_protocol_purpose().clone(),
};
ProtocolMessage::from_client_request_v0(ClientRequestContentV0::PinRepo(msg), overlay)
}
}
impl TryFrom<ProtocolMessage> for RepoOpened {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let res: ClientResponseContentV0 = msg.try_into()?;
if let ClientResponseContentV0::RepoOpened(a) = res {
Ok(a)
} else {
log_debug!("INVALID {:?}", res);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<RepoOpened> for ProtocolMessage {
fn from(res: RepoOpened) -> ProtocolMessage {
ClientResponseContentV0::RepoOpened(res).into()
}
}
impl Actor<'_, RepoPinStatusReq, RepoPinStatus> {}
#[async_trait::async_trait]
impl EActor for Actor<'_, PinRepo, RepoOpened> {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = PinRepo::try_from(msg)?;
//TODO implement all the server side logic
let broker = BROKER.read().await;
let res = broker.get_server_storage()?.pin_repo(
req.overlay(),
req.hash(),
req.ro_topics(),
req.rw_topics(),
)?;
fsm.lock().await.send(res.into()).await?;
Ok(())
}
}

@ -0,0 +1,88 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use crate::broker::{ServerConfig, BROKER};
use crate::connection::NoiseFSM;
use crate::types::*;
use crate::{actor::*, errors::ProtocolError, types::ProtocolMessage};
use async_std::sync::Mutex;
use ng_repo::log::*;
use ng_repo::types::PubKey;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
impl RepoPinStatusReq {
pub fn get_actor(&self) -> Box<dyn EActor> {
Actor::<RepoPinStatusReq, RepoPinStatus>::new_responder()
}
}
impl TryFrom<ProtocolMessage> for RepoPinStatusReq {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let req: ClientRequestContentV0 = msg.try_into()?;
if let ClientRequestContentV0::RepoPinStatusReq(a) = req {
Ok(a)
} else {
log_debug!("INVALID {:?}", req);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<RepoPinStatusReq> for ProtocolMessage {
fn from(msg: RepoPinStatusReq) -> ProtocolMessage {
let overlay = *msg.overlay();
ProtocolMessage::from_client_request_v0(
ClientRequestContentV0::RepoPinStatusReq(msg),
overlay,
)
}
}
impl TryFrom<ProtocolMessage> for RepoPinStatus {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let res: ClientResponseContentV0 = msg.try_into()?;
if let ClientResponseContentV0::RepoPinStatus(a) = res {
Ok(a)
} else {
log_debug!("INVALID {:?}", res);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<RepoPinStatus> for ProtocolMessage {
fn from(res: RepoPinStatus) -> ProtocolMessage {
ClientResponseContentV0::RepoPinStatus(res).into()
}
}
impl Actor<'_, RepoPinStatusReq, RepoPinStatus> {}
#[async_trait::async_trait]
impl EActor for Actor<'_, RepoPinStatusReq, RepoPinStatus> {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = RepoPinStatusReq::try_from(msg)?;
let broker = BROKER.read().await;
let res = broker
.get_server_storage()?
.get_repo_pin_status(req.overlay(), req.hash())?;
fsm.lock().await.send(res.into()).await?;
Ok(())
}
}

@ -0,0 +1,113 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use crate::broker::{ServerConfig, BROKER};
use crate::connection::NoiseFSM;
use crate::types::*;
use crate::{actor::*, errors::ProtocolError, types::ProtocolMessage};
use async_std::sync::Mutex;
use ng_repo::log::*;
use ng_repo::repo::{BranchInfo, Repo};
use ng_repo::types::*;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
impl TopicSub {
pub fn get_actor(&self) -> Box<dyn EActor> {
Actor::<TopicSub, TopicSubRes>::new_responder()
}
/// only set broker_id if you want to be a publisher
pub fn new(repo: &Repo, branch: &BranchInfo, broker_id: Option<&DirectPeerId>) -> TopicSub {
let (overlay, publisher) = if broker_id.is_some() && branch.topic_priv_key.is_some() {
(
OverlayId::inner_from_store(&repo.store),
Some(PublisherAdvert::new(
branch.topic,
branch.topic_priv_key.to_owned().unwrap(),
*broker_id.unwrap(),
)),
)
} else {
(OverlayId::outer(repo.store.id()), None)
};
TopicSub::V0(TopicSubV0 {
repo_hash: repo.id.into(),
overlay: Some(overlay),
topic: branch.topic,
publisher,
})
}
}
impl TryFrom<ProtocolMessage> for TopicSub {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let req: ClientRequestContentV0 = msg.try_into()?;
if let ClientRequestContentV0::TopicSub(a) = req {
Ok(a)
} else {
log_debug!("INVALID {:?}", req);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<TopicSub> for ProtocolMessage {
fn from(msg: TopicSub) -> ProtocolMessage {
let overlay = *msg.overlay();
ProtocolMessage::from_client_request_v0(ClientRequestContentV0::TopicSub(msg), overlay)
}
}
impl TryFrom<ProtocolMessage> for TopicSubRes {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let res: ClientResponseContentV0 = msg.try_into()?;
if let ClientResponseContentV0::TopicSubRes(a) = res {
Ok(a)
} else {
log_debug!("INVALID {:?}", res);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<TopicSubRes> for ProtocolMessage {
fn from(res: TopicSubRes) -> ProtocolMessage {
ClientResponseContentV0::TopicSubRes(res).into()
}
}
impl Actor<'_, TopicSub, TopicSubRes> {}
#[async_trait::async_trait]
impl EActor for Actor<'_, TopicSub, TopicSubRes> {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = TopicSub::try_from(msg)?;
//TODO implement all the server side logic
let broker = BROKER.read().await;
let res = broker.get_server_storage()?.topic_sub(
req.overlay(),
req.hash(),
req.topic(),
req.publisher(),
)?;
fsm.lock().await.send(res.into()).await?;
Ok(())
}
}

@ -26,3 +26,5 @@ pub use list_invitations::*;
pub mod connecting;
pub use connecting::*;
pub mod client;

@ -12,6 +12,7 @@
//! Broker singleton present in every instance of NextGraph (Client, Server, Core node)
use crate::actor::EActor;
use crate::actor::SoS;
use crate::connection::*;
use crate::errors::*;
use crate::server_storage::ServerStorage;
@ -431,7 +432,7 @@ impl<'a> Broker<'a> {
None => {}
}
}
pub fn remove_peer_id(&mut self, peer_id: X25519PrivKey, user: Option<PubKey>) {
fn remove_peer_id(&mut self, peer_id: X25519PrivKey, user: Option<PubKey>) {
let removed = self.peers.remove(&(user, peer_id));
match removed {
Some(info) => match info.connected {
@ -460,10 +461,10 @@ impl<'a> Broker<'a> {
// #[cfg(not(target_arch = "wasm32"))]
// pub fn test_storage(&self, path: PathBuf) {
// use ng_storage_rocksdb::kcv_store::RocksdbKCVStore;
// use ng_storage_rocksdb::kcv_store::RocksdbKCVStorage;
// let key: [u8; 32] = [0; 32];
// let test_storage = RocksdbKCVStore::open(&path, key);
// let test_storage = RocksdbKCVStorage::open(&path, key);
// match test_storage {
// Err(e) => {
// log_debug!("storage error {}", e);
@ -494,9 +495,6 @@ impl<'a> Broker<'a> {
server_storage: None,
disconnections_sender,
disconnections_receiver: Some(disconnections_receiver),
// last_seq_function: None,
// in_memory: true,
// base_path: None,
local_broker: None,
}
}
@ -918,11 +916,31 @@ impl<'a> Broker<'a> {
Ok(())
}
pub async fn request<
A: Into<ProtocolMessage> + std::fmt::Debug + Sync + Send + 'static,
B: TryFrom<ProtocolMessage, Error = ProtocolError> + std::fmt::Debug + Sync + Send + 'static,
>(
&self,
user: &UserId,
remote_peer_id: &DirectPeerId,
msg: A,
) -> Result<SoS<B>, ProtocolError> {
let bpi = self
.peers
.get(&(Some(*user), remote_peer_id.to_dh_slice()))
.ok_or(ProtocolError::InvalidValue)?;
if let PeerConnection::Client(cnx) = &bpi.connected {
cnx.request(msg).await
} else {
Err(ProtocolError::BrokerError)
}
}
pub fn take_disconnections_receiver(&mut self) -> Option<Receiver<String>> {
self.disconnections_receiver.take()
}
pub async fn close_peer_connection_x(&mut self, peer_id: X25519PubKey, user: Option<PubKey>) {
async fn close_peer_connection_x(&mut self, peer_id: X25519PubKey, user: Option<PubKey>) {
if let Some(peer) = self.peers.get_mut(&(user, peer_id)) {
match &mut peer.connected {
PeerConnection::Core(_) => {

@ -85,6 +85,9 @@ pub enum ProtocolError {
EncryptionError,
WhereIsTheMagic,
RepoAlreadyOpened,
False,
InvalidNonce,
} //MAX 949 ProtocolErrors

@ -15,7 +15,7 @@ use crate::{
errors::{ProtocolError, ServerError},
types::*,
};
use ng_repo::types::{PeerId, PubKey};
use ng_repo::types::*;
pub trait ServerStorage: Send + Sync {
fn get_user(&self, user_id: PubKey) -> Result<bool, ProtocolError>;
@ -38,4 +38,26 @@ pub trait ServerStorage: Send + Sync {
fn remove_invitation(&self, invite: [u8; 32]) -> Result<(), ProtocolError>;
fn next_seq_for_peer(&self, peer: &PeerId, seq: u64) -> Result<(), ServerError>;
fn get_repo_pin_status(
&self,
overlay: &OverlayId,
repo: &RepoHash,
) -> Result<RepoPinStatus, ProtocolError>;
fn pin_repo(
&self,
overlay: &OverlayId,
repo: &RepoHash,
ro_topics: &Vec<TopicId>,
rw_topics: &Vec<PublisherAdvert>,
) -> Result<RepoOpened, ProtocolError>;
fn topic_sub(
&self,
overlay: &OverlayId,
repo: &RepoHash,
topic: &TopicId,
publisher: Option<&PublisherAdvert>,
) -> Result<TopicSubRes, ProtocolError>;
}

@ -19,7 +19,7 @@ use crate::WS_PORT_ALTERNATE;
use crate::{actor::EActor, actors::*, errors::ProtocolError};
use core::fmt;
use ng_repo::errors::NgError;
use ng_repo::log::*;
use ng_repo::types::*;
use serde::{Deserialize, Serialize};
use std::{
@ -1238,6 +1238,13 @@ impl OverlayAccess {
Err(NgError::InvalidArgument)
}
}
pub fn overlay_id_for_client_protocol_purpose(&self) -> &OverlayId {
match self {
Self::ReadOnly(ro) => ro,
Self::ReadWrite((inner, outer)) => inner,
Self::WriteOnly(wo) => wo,
}
}
}
/// Inner Overlay Link
@ -1472,6 +1479,29 @@ pub enum PublisherAdvert {
V0(PublisherAdvertV0),
}
use ng_repo::utils::sign;
impl PublisherAdvert {
pub fn new(
topic_id: TopicId,
topic_key: BranchWriteCapSecret,
broker_peer: DirectPeerId,
) -> PublisherAdvert {
let content = PublisherAdvertContentV0 {
peer: broker_peer,
topic: topic_id,
};
let content_ser = serde_bare::to_vec(&content).unwrap();
let sig = sign(&topic_key, &topic_id, &content_ser).unwrap();
PublisherAdvert::V0(PublisherAdvertV0 { content, sig })
}
pub fn topic_id(&self) -> &TopicId {
match self {
Self::V0(v0) => &v0.content.topic,
}
}
}
/// Topic subscription request by a peer
///
/// Forwarded towards all publishers along subscription routing table entries
@ -2574,7 +2604,8 @@ impl OpenRepo {
/// Request to pin a repo on the broker.
///
/// When client will disconnect, the subscriptions and publisherAdvert of the topics will be remain active on the broker,
/// When client will disconnect, the subscriptions and publisherAdvert of the topics will be remain active on the broker.
/// replied with a RepoOpened
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PinRepoV0 {
/// Repo Hash
@ -2590,7 +2621,8 @@ pub struct PinRepoV0 {
pub expose_outer: bool,
/// Broker peers to connect to in order to join the overlay
/// If the repo has previously been opened (during the same session) or if it is a private overlay, then peers info can be omitted
/// If the repo has previously been opened (during the same session) or if it is a private overlay, then peers info can be omitted.
/// If there are no known peers in the overlay yet, vector is left empty (creation of a store, or repo in a store that is owned by user).
pub peers: Vec<PeerAdvert>,
/// Maximum number of peers to connect to for this overlay (only valid for an inner (RW/WO) overlay)
@ -2626,6 +2658,26 @@ impl PinRepo {
PinRepo::V0(o) => &o.peers,
}
}
pub fn hash(&self) -> &RepoHash {
match self {
PinRepo::V0(o) => &o.hash,
}
}
pub fn ro_topics(&self) -> &Vec<TopicId> {
match self {
PinRepo::V0(o) => &o.ro_topics,
}
}
pub fn rw_topics(&self) -> &Vec<PublisherAdvert> {
match self {
PinRepo::V0(o) => &o.rw_topics,
}
}
pub fn overlay(&self) -> &OverlayId {
match self {
PinRepo::V0(o) => &o.overlay.overlay_id_for_client_protocol_purpose(),
}
}
}
/// Request to refresh the Pinning of a previously pinned repo.
@ -2692,6 +2744,9 @@ impl UnpinRepo {
pub struct RepoPinStatusReqV0 {
/// Repo Hash
pub hash: RepoHash,
#[serde(skip)]
pub overlay: Option<OverlayId>,
}
/// Request the status of pinning for a repo on the broker.
@ -2706,6 +2761,17 @@ impl RepoPinStatusReq {
RepoPinStatusReq::V0(o) => &o.hash,
}
}
pub fn set_overlay(&mut self, overlay: OverlayId) {
match self {
Self::V0(v0) => v0.overlay = Some(overlay),
}
}
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
}
/// Response with the status of pinning for a repo on the broker. V0
@ -2717,11 +2783,8 @@ pub struct RepoPinStatusV0 {
/// only possible for RW overlays
pub expose_outer: bool,
/// list of topics that are subscribed to (not included the RW ones. see list just below)
pub ro_topics: Vec<TopicId>,
/// list of topics that are publisher
pub rw_topics: Vec<TopicId>,
/// list of topics that are subscribed to
pub topics: Vec<TopicSubRes>,
// TODO pub inbox_proof
// TODO pub signer_proof
@ -2739,21 +2802,36 @@ impl RepoPinStatus {
RepoPinStatus::V0(o) => &o.hash,
}
}
pub fn is_topic_subscribed_as_publisher(&self, topic: &TopicId) -> bool {
match self {
Self::V0(v0) => {
for sub in &v0.topics {
if sub.is_publisher() {
return true;
}
}
false
}
}
}
}
/// Request subscription to a `Topic` of an already opened or pinned Repo
///
/// replied with a list of TopicSubRes containing the current heads that should be used to do a TopicSync
/// replied with a TopicSubRes containing the current heads that should be used to do a TopicSync
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct TopicSubV0 {
/// Topic to subscribe
pub topic: PubKey,
pub topic: TopicId,
/// Hash of the repo that was previously opened or pinned
pub repo_hash: RepoHash,
/// Publisher need to provide a signed `PublisherAdvert` for the PeerId of the broker
pub publisher: Option<PublisherAdvert>,
#[serde(skip)]
pub overlay: Option<OverlayId>,
}
/// Request subscription to a `Topic` of an already opened or pinned Repo
@ -2762,6 +2840,34 @@ pub enum TopicSub {
V0(TopicSubV0),
}
impl TopicSub {
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
pub fn hash(&self) -> &RepoHash {
match self {
Self::V0(o) => &o.repo_hash,
}
}
pub fn topic(&self) -> &TopicId {
match self {
Self::V0(o) => &o.topic,
}
}
pub fn publisher(&self) -> Option<&PublisherAdvert> {
match self {
Self::V0(o) => o.publisher.as_ref(),
}
}
pub fn set_overlay(&mut self, overlay: OverlayId) {
match self {
Self::V0(v0) => v0.overlay = Some(overlay),
}
}
}
/// Request unsubscription from a `Topic` of an already opened or pinned Repo
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
pub struct TopicUnsubV0 {
@ -2953,6 +3059,18 @@ pub enum ClientRequestContentV0 {
BlocksPut(BlocksPut),
PublishEvent(Event),
}
impl ClientRequestContentV0 {
pub fn set_overlay(&mut self, overlay: OverlayId) {
match self {
ClientRequestContentV0::RepoPinStatusReq(a) => a.set_overlay(overlay),
ClientRequestContentV0::TopicSub(a) => a.set_overlay(overlay),
ClientRequestContentV0::PinRepo(a) => {}
_ => unimplemented!(),
}
}
}
/// Broker overlay request
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ClientRequestV0 {
@ -2987,6 +3105,36 @@ impl ClientRequest {
ClientRequest::V0(o) => &o.content,
}
}
pub fn get_actor(&self) -> Box<dyn EActor> {
match self {
Self::V0(ClientRequestV0 { content, .. }) => match content {
ClientRequestContentV0::RepoPinStatusReq(r) => r.get_actor(),
_ => unimplemented!(),
},
}
}
}
impl TryFrom<ProtocolMessage> for ClientRequestContentV0 {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
if let ProtocolMessage::ClientMessage(ClientMessage::V0(ClientMessageV0 {
overlay: overlay,
content:
ClientMessageContentV0::ClientRequest(ClientRequest::V0(ClientRequestV0 {
content: mut content,
..
})),
..
})) = msg
{
content.set_overlay(overlay);
Ok(content)
} else {
log_debug!("INVALID {:?}", msg);
Err(ProtocolError::InvalidValue)
}
}
}
/// Response which blocks have been found locally. V0
@ -3022,8 +3170,9 @@ impl BlocksFound {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct TopicSubResV0 {
/// Topic subscribed
pub topic: PubKey,
pub topic: TopicId,
pub known_heads: Vec<ObjectId>,
pub publisher: bool,
}
/// Topic subscription response
@ -3034,12 +3183,47 @@ pub enum TopicSubRes {
V0(TopicSubResV0),
}
impl TopicSubRes {
pub fn topic_id(&self) -> &TopicId {
match self {
Self::V0(v0) => &v0.topic,
}
}
pub fn is_publisher(&self) -> bool {
match self {
Self::V0(v0) => v0.publisher,
}
}
}
impl From<TopicId> for TopicSubRes {
fn from(topic: TopicId) -> Self {
TopicSubRes::V0(TopicSubResV0 {
topic,
known_heads: vec![],
publisher: false,
})
}
}
impl From<PublisherAdvert> for TopicSubRes {
fn from(topic: PublisherAdvert) -> Self {
TopicSubRes::V0(TopicSubResV0 {
topic: topic.topic_id().clone(),
known_heads: vec![],
publisher: true,
})
}
}
pub type RepoOpened = Vec<TopicSubRes>;
/// Content of `ClientResponseV0`
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ClientResponseContentV0 {
EmptyResponse,
Block(Block),
RepoOpened(Vec<TopicSubRes>),
RepoOpened(RepoOpened),
TopicSubRes(TopicSubRes),
TopicSyncRes(TopicSyncRes),
BlocksFound(BlocksFound),
@ -3065,6 +3249,16 @@ pub enum ClientResponse {
V0(ClientResponseV0),
}
impl From<ProtocolError> for ClientResponse {
fn from(err: ProtocolError) -> ClientResponse {
ClientResponse::V0(ClientResponseV0 {
id: 0,
result: err.into(),
content: ClientResponseContentV0::EmptyResponse,
})
}
}
impl ClientResponse {
pub fn id(&self) -> i64 {
match self {
@ -3093,6 +3287,31 @@ impl ClientResponse {
}
}
impl TryFrom<ProtocolMessage> for ClientResponseContentV0 {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
if let ProtocolMessage::ClientMessage(ClientMessage::V0(ClientMessageV0 {
content:
ClientMessageContentV0::ClientResponse(ClientResponse::V0(ClientResponseV0 {
content: content,
result: res,
..
})),
..
})) = msg
{
if res == 0 {
Ok(content)
} else {
Err(ProtocolError::try_from(res).unwrap())
}
} else {
log_debug!("INVALID {:?}", msg);
Err(ProtocolError::InvalidValue)
}
}
}
/// Content of `ClientMessageV0`
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ClientMessageContentV0 {
@ -3198,6 +3417,19 @@ impl ClientMessage {
},
}
}
pub fn get_actor(&self) -> Box<dyn EActor> {
match self {
ClientMessage::V0(o) => match &o.content {
ClientMessageContentV0::ClientRequest(req) => req.get_actor(),
ClientMessageContentV0::ClientResponse(_)
| ClientMessageContentV0::ForwardedEvent(_)
| ClientMessageContentV0::ForwardedBlock(_) => {
panic!("it is not a request");
}
},
}
}
}
//
@ -3468,6 +3700,7 @@ impl ProtocolMessage {
match self {
//ProtocolMessage::Noise(a) => a.get_actor(),
ProtocolMessage::Start(a) => a.get_actor(),
ProtocolMessage::ClientMessage(a) => a.get_actor(),
// ProtocolMessage::ServerHello(a) => a.get_actor(),
// ProtocolMessage::ClientAuth(a) => a.get_actor(),
// ProtocolMessage::AuthResult(a) => a.get_actor(),
@ -3477,6 +3710,46 @@ impl ProtocolMessage {
_ => unimplemented!(),
}
}
pub fn from_client_response_err(err: ProtocolError) -> ProtocolMessage {
let res: ClientResponse = err.into();
res.into()
}
pub fn from_client_request_v0(
req: ClientRequestContentV0,
overlay: OverlayId,
) -> ProtocolMessage {
ProtocolMessage::ClientMessage(ClientMessage::V0(ClientMessageV0 {
overlay,
content: ClientMessageContentV0::ClientRequest(ClientRequest::V0(ClientRequestV0 {
id: 0,
content: req,
})),
padding: vec![],
}))
}
}
impl From<ClientResponseContentV0> for ProtocolMessage {
fn from(msg: ClientResponseContentV0) -> ProtocolMessage {
let client_res = ClientResponse::V0(ClientResponseV0 {
id: 0,
result: 0,
content: msg,
});
client_res.into()
}
}
impl From<ClientResponse> for ProtocolMessage {
fn from(msg: ClientResponse) -> ProtocolMessage {
ProtocolMessage::ClientMessage(ClientMessage::V0(ClientMessageV0 {
overlay: OverlayId::nil(),
content: ClientMessageContentV0::ClientResponse(msg),
padding: vec![],
}))
}
}
//

@ -49,6 +49,10 @@ pub enum NgError {
BranchNotFound,
StoreNotFound,
UserNotFound,
TopicNotFound,
NotConnected,
ProtocolError,
ActorError,
}
impl Error for NgError {}
@ -165,6 +169,9 @@ pub enum StorageError {
AlreadyExists,
DataCorruption,
UnknownColumnFamily,
PropertyNotFound,
NotAStoreRepo,
OverlayBranchNotFound,
}
impl core::fmt::Display for StorageError {

@ -82,6 +82,12 @@ impl Event {
Event::V0(v0) => v0.content.seq,
}
}
pub fn topic_id(&self) -> &TopicId {
match self {
Event::V0(v0) => &v0.content.topic,
}
}
}
impl EventV0 {
@ -120,7 +126,10 @@ impl EventV0 {
let store = Arc::clone(&repo.store);
let branch = repo.branch(branch_id)?;
let topic_id = &branch.topic;
let topic_priv_key = &branch.topic_priv_key;
let topic_priv_key = branch
.topic_priv_key
.as_ref()
.ok_or(NgError::PermissionDenied)?;
let publisher_pubkey = publisher.to_pub();
let key = Self::derive_key(&repo_id, branch_id, &branch.read_cap.key, &publisher_pubkey);
let commit_key = commit.key().unwrap();

@ -62,6 +62,16 @@ pub trait WriteTransaction: ReadTransaction {
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
/// Delete all properties' values of a key from the store in case the property is a multi-values one
fn del_all_values(
&self,
prefix: u8,
key: &Vec<u8>,
property_size: usize,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError>;
}
pub trait ReadTransaction {
@ -115,7 +125,7 @@ pub trait ReadTransaction {
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, StorageError>;
}
pub trait KCVStore: WriteTransaction {
pub trait KCVStorage: WriteTransaction {
fn write_transaction(
&self,
method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>,

@ -82,7 +82,7 @@ pub struct BranchInfo {
pub topic: TopicId,
pub topic_priv_key: BranchWriteCapSecret,
pub topic_priv_key: Option<BranchWriteCapSecret>,
pub read_cap: ReadCap,
}
@ -104,6 +104,21 @@ pub struct Repo {
pub branches: HashMap<BranchId, BranchInfo>,
/// if opened_branches is empty, it means the repo has not been opened yet.
/// if a branchId is present in the hashmap, it means it is opened.
/// the boolean indicates if the branch is opened as publisher or not
pub opened_branches: HashMap<BranchId, bool>,
/*pub main_branch_rc: Option<BranchId>,
pub chat_branch_rc: Option<BranchId>,
// only used if it is a StoreRepo
pub store_branch_rc: Option<BranchId>,
pub overlay_branch_rc: Option<BranchId>,
// only used if it is a private StoreRepo
pub user_branch_rc: Option<BranchId>,*/
pub store: Arc<Store>,
}
@ -163,6 +178,7 @@ impl Repo {
read_cap: None,
write_cap: None,
branches: HashMap::new(),
opened_branches: HashMap::new(),
}
}
@ -204,6 +220,17 @@ impl Repo {
}
}
pub fn branch_is_opened(&self, branch: &BranchId) -> bool {
self.opened_branches.contains_key(branch)
}
pub fn branch_is_opened_as_publisher(&self, branch: &BranchId) -> bool {
match self.opened_branches.get(branch) {
Some(val) => *val,
None => false,
}
}
// pub(crate) fn get_store(&self) -> &Store {
// self.store.unwrap()
// }

@ -12,7 +12,7 @@
//! Store of a Site, or of a Group or Dialog
use core::fmt;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::sync::{Arc, RwLock};
use crate::block_storage::BlockStorage;
@ -31,7 +31,7 @@ pub struct Store {
store_repo: StoreRepo,
store_readcap: ReadCap,
store_overlay_branch_readcap: ReadCap,
overlay_id: OverlayId,
pub overlay_id: OverlayId,
storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
}
@ -49,6 +49,9 @@ impl fmt::Debug for Store {
}
impl Store {
pub fn id(&self) -> &PubKey {
self.store_repo.repo_id()
}
pub fn set_read_caps(&mut self, read_cap: ReadCap, overlay_read_cap: Option<ReadCap>) {
self.store_readcap = read_cap;
if let Some(overlay_read_cap) = overlay_read_cap {
@ -386,7 +389,7 @@ impl Store {
id: repo_pub_key.clone(),
branch_type: BranchType::Root,
topic: topic_pub_key,
topic_priv_key: topic_priv_key,
topic_priv_key: Some(topic_priv_key),
read_cap: root_branch_readcap.clone(),
};
@ -394,7 +397,7 @@ impl Store {
id: main_branch_pub_key.clone(),
branch_type: BranchType::Main,
topic: main_branch_topic_pub_key,
topic_priv_key: main_branch_topic_priv_key,
topic_priv_key: Some(main_branch_topic_priv_key),
read_cap: branch_read_cap,
};
@ -410,6 +413,7 @@ impl Store {
(repo_pub_key, root_branch),
(main_branch_pub_key, main_branch),
]),
opened_branches: HashMap::new(),
};
Ok((repo, events))

@ -12,6 +12,7 @@
//! Corresponds to the BARE schema
use crate::errors::NgError;
use crate::store::Store;
use crate::utils::{
decode_key, dh_pubkey_array_from_ed_pubkey_slice, dh_pubkey_from_ed_pubkey_slice,
ed_privkey_to_ed_pubkey, from_ed_privkey_to_dh_privkey, random_key,
@ -160,7 +161,6 @@ impl PubKey {
}
}
#[deprecated(note = "**Don't use nil method**")]
pub fn nil() -> Self {
PubKey::Ed25519PubKey([0u8; 32])
}
@ -362,6 +362,12 @@ pub type RepoId = PubKey;
/// RepoHash is the BLAKE3 Digest over the RepoId
pub type RepoHash = Digest;
impl From<RepoId> for RepoHash {
fn from(id: RepoId) -> Self {
Digest::Blake3Digest32(*blake3::hash(id.slice()).as_bytes())
}
}
// impl From<RepoHash> for String {
// fn from(id: RepoHash) -> Self {
// hex::encode(to_vec(&id).unwrap())
@ -519,19 +525,22 @@ impl fmt::Display for OverlayId {
}
impl OverlayId {
pub fn inner_from_store(store: &Store) -> OverlayId {
Self::inner(store.id(), store.get_store_overlay_branch_readcap_secret())
}
pub fn inner(
store_id: &PubKey,
store_overlay_branch_readcap_secret: ReadCapSecret,
store_overlay_branch_readcap_secret: &ReadCapSecret,
) -> OverlayId {
let store_id = serde_bare::to_vec(store_id).unwrap();
let mut store_overlay_branch_readcap_secret =
serde_bare::to_vec(&store_overlay_branch_readcap_secret).unwrap();
let mut store_overlay_branch_readcap_secret_ser =
serde_bare::to_vec(store_overlay_branch_readcap_secret).unwrap();
let mut key: [u8; 32] = blake3::derive_key(
"NextGraph Overlay ReadCapSecret BLAKE3 key",
store_overlay_branch_readcap_secret.as_slice(),
store_overlay_branch_readcap_secret_ser.as_slice(),
);
let key_hash = blake3::keyed_hash(&key, &store_id);
store_overlay_branch_readcap_secret.zeroize();
store_overlay_branch_readcap_secret_ser.zeroize();
key.zeroize();
OverlayId::Inner(Digest::from_slice(*key_hash.as_bytes()))
}
@ -544,7 +553,6 @@ impl OverlayId {
pub fn dummy() -> OverlayId {
OverlayId::Outer(Digest::dummy())
}
#[deprecated(note = "**Don't use nil method**")]
pub fn nil() -> OverlayId {
OverlayId::Outer(Digest::nil())
}
@ -609,7 +617,7 @@ impl StoreOverlay {
| StoreOverlay::V0(StoreOverlayV0::ProtectedStore(id))
| StoreOverlay::V0(StoreOverlayV0::PrivateStore(id))
| StoreOverlay::V0(StoreOverlayV0::Group(id)) => {
OverlayId::inner(id, store_overlay_branch_readcap_secret)
OverlayId::inner(id, &store_overlay_branch_readcap_secret)
}
StoreOverlay::V0(StoreOverlayV0::Dialog(d)) => unimplemented!(),
StoreOverlay::Own(_) => unimplemented!(),
@ -681,6 +689,13 @@ impl StoreRepo {
OverlayId::outer(self.repo_id())
}
pub fn is_private(&self) -> bool {
match self {
Self::V0(StoreRepoV0::PrivateStore(_)) => true,
_ => false,
}
}
// pub fn overlay_id_for_storage_purpose(
// &self,
// store_overlay_branch_readcap_secret: Option<ReadCapSecret>,
@ -1220,7 +1235,7 @@ pub enum BranchType {
Store,
Overlay,
User,
Transactional, // this could have been called OtherTransaction, but for the sake of simplicity, we use Transactional for any branch that is not the Main one.
Transactional, // this could have been called OtherTransactional, but for the sake of simplicity, we use Transactional for any branch that is not the Main one.
Root, // only used for BranchInfo
}

@ -32,7 +32,7 @@ use serde::{Deserialize, Serialize};
use serde_bare::error::Error;
pub struct LmdbTransaction<'a> {
store: &'a LmdbKCVStore,
store: &'a LmdbKCVStorage,
writer: Option<Writer<LmdbRwTransaction<'a>>>,
}
@ -54,7 +54,7 @@ impl<'a> ReadTransaction for LmdbTransaction<'a> {
}
/// Load a single value property from the store.
fn get(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<Vec<u8>, StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
let mut iter = self
.store
@ -75,7 +75,7 @@ impl<'a> ReadTransaction for LmdbTransaction<'a> {
key: &Vec<u8>,
suffix: Option<u8>,
) -> Result<Vec<Vec<u8>>, StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
let mut iter = self
.store
@ -103,7 +103,7 @@ impl<'a> ReadTransaction for LmdbTransaction<'a> {
suffix: Option<u8>,
value: &Vec<u8>,
) -> Result<(), StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
let exists = self
.store
@ -131,7 +131,7 @@ impl<'a> WriteTransaction for LmdbTransaction<'a> {
suffix: Option<u8>,
value: &Vec<u8>,
) -> Result<(), StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
self.store
.main_store
.put(
@ -152,7 +152,7 @@ impl<'a> WriteTransaction for LmdbTransaction<'a> {
suffix: Option<u8>,
value: &Vec<u8>,
) -> Result<(), StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
self.store
.main_store
@ -173,7 +173,7 @@ impl<'a> WriteTransaction for LmdbTransaction<'a> {
/// Delete a property from the store.
fn del(&mut self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
let res = self
.store
.main_store
@ -195,7 +195,7 @@ impl<'a> WriteTransaction for LmdbTransaction<'a> {
suffix: Option<u8>,
value: &Vec<u8>,
) -> Result<(), StorageError> {
let property = LmdbKCVStore::compute_property(prefix, key, suffix);
let property = LmdbKCVStorage::compute_property(prefix, key, suffix);
self.store
.main_store
.delete(
@ -226,7 +226,7 @@ impl<'a> WriteTransaction for LmdbTransaction<'a> {
}
#[derive(Debug)]
pub struct LmdbKCVStore {
pub struct LmdbKCVStorage {
/// the main store where all the properties of keys are stored
main_store: MultiStore<LmdbDatabase>,
/// the opened environment so we can create new transactions
@ -249,7 +249,7 @@ fn compare<T: Ord>(a: &[T], b: &[T]) -> std::cmp::Ordering {
return a.len().cmp(&b.len());
}
impl ReadTransaction for LmdbKCVStore {
impl ReadTransaction for LmdbKCVStorage {
fn get_all_keys_and_values(
&self,
prefix: u8,
@ -369,7 +369,7 @@ impl ReadTransaction for LmdbKCVStore {
}
}
impl KCVStore for LmdbKCVStore {
impl KCVStorage for LmdbKCVStorage {
fn write_transaction(
&self,
method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>,
@ -439,7 +439,7 @@ impl KCVStore for LmdbKCVStore {
}
}
impl LmdbKCVStore {
impl LmdbKCVStorage {
pub fn path(&self) -> PathBuf {
PathBuf::from(&self.path)
}
@ -454,9 +454,9 @@ impl LmdbKCVStore {
new
}
/// Opens the store and returns a KCVStore object that should be kept and used to manipulate the properties
/// Opens the store and returns a KCVStorage object that should be kept and used to manipulate the properties
/// The key is the encryption key for the data at rest.
pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result<LmdbKCVStore, StorageError> {
pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result<LmdbKCVStorage, StorageError> {
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let mut builder = Lmdb::new();
@ -487,7 +487,7 @@ impl LmdbKCVStore {
StorageError::BackendError
})?;
Ok(LmdbKCVStore {
Ok(LmdbKCVStorage {
environment: shared_rkv.clone(),
main_store,
path: path.to_str().unwrap().to_string(),

@ -32,7 +32,7 @@ pub struct RocksDbBlockStorage {
}
impl RocksDbBlockStorage {
/// Opens the store and returns a KCVStore object that should be kept and used to manipulate the properties
/// Opens the store and returns a KCVStorage object that should be kept and used to manipulate the properties
/// The key is the encryption key for the data at rest.
pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result<RocksDbBlockStorage, StorageError> {
let mut opts = Options::default();
@ -80,22 +80,20 @@ impl BlockStorage for RocksDbBlockStorage {
fn put(&self, overlay: &OverlayId, block: &Block) -> Result<BlockId, StorageError> {
// TODO? return an error if already present in blockstorage?
let block_id = block.id();
let block_id_ser = serde_bare::to_vec(&block_id).unwrap();
let ser = serde_bare::to_vec(block)?;
let tx = self.db.transaction();
tx.put(Self::compute_key(overlay, &block_id), &ser)
.map_err(|_e| StorageError::BackendError)?;
tx.commit();
tx.commit().map_err(|_| StorageError::BackendError)?;
Ok(block_id)
}
/// Delete a block from the storage.
fn del(&self, overlay: &OverlayId, id: &BlockId) -> Result<usize, StorageError> {
let block_id_ser = serde_bare::to_vec(id).unwrap();
let tx = self.db.transaction();
tx.delete(Self::compute_key(overlay, id))
.map_err(|_e| StorageError::BackendError)?;
tx.commit();
tx.commit().map_err(|_| StorageError::BackendError)?;
// TODO, return real size
Ok(0)
}

@ -23,7 +23,7 @@ use rocksdb::{
};
pub struct RocksdbTransaction<'a> {
store: &'a RocksdbKCVStore,
store: &'a RocksdbKCVStorage,
tx: Option<rocksdb::Transaction<'a, TransactionDB>>,
}
@ -64,7 +64,7 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
family: &Option<String>,
) -> Result<Vec<(Vec<u8>, Vec<u8>)>, StorageError> {
let property_start =
RocksdbKCVStore::calc_key_start(prefix, key_size, &key_prefix, &suffix);
RocksdbKCVStorage::calc_key_start(prefix, key_size, &key_prefix, &suffix);
let iter = self.get_iterator(&property_start, &family)?;
self.store
.get_all_keys_and_values_(prefix, key_size, key_prefix, suffix, iter)
@ -79,7 +79,7 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
) -> Result<HashMap<u8, Vec<u8>>, StorageError> {
let key_size = key.len();
let prop_values = self.get_all_keys_and_values(prefix, key_size, key, None, family)?;
Ok(RocksdbKCVStore::get_all_properties_of_key(
Ok(RocksdbKCVStorage::get_all_properties_of_key(
prop_values,
key_size,
&properties,
@ -94,7 +94,7 @@ impl<'a> ReadTransaction for RocksdbTransaction<'a> {
suffix: Option<u8>,
family: &Option<String>,
) -> Result<Vec<u8>, StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, &suffix);
let property = RocksdbKCVStorage::compute_property(prefix, key, &suffix);
let res = match family {
Some(cf) => self.tx().get_for_update_cf(
self.store
@ -152,7 +152,7 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> {
value: &Vec<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, &suffix);
let property = RocksdbKCVStorage::compute_property(prefix, key, &suffix);
match family {
Some(cf) => self.tx().put_cf(
self.store
@ -189,7 +189,7 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> {
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let property = RocksdbKCVStore::compute_property(prefix, key, &suffix);
let property = RocksdbKCVStorage::compute_property(prefix, key, &suffix);
let res = match family {
Some(cf) => self.tx().delete_cf(
self.store
@ -243,9 +243,63 @@ impl<'a> WriteTransaction for RocksdbTransaction<'a> {
}
Ok(())
}
fn del_all_values(
&self,
prefix: u8,
key: &Vec<u8>,
property_size: usize,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
let key_size = key.len() + property_size;
let property_start = RocksdbKCVStorage::calc_key_start(prefix, key_size, &key, &suffix);
let mut iter = self.get_iterator(&property_start, &family)?;
let mut vec_key_end = key.clone();
let mut trailing_max = vec![255u8; property_size];
vec_key_end.append(&mut trailing_max);
// let property_start = Self::compute_property(prefix, &vec_key_start, suffix);
let property_end = RocksdbKCVStorage::compute_property(
prefix,
&vec_key_end,
&Some(suffix.unwrap_or(255u8)),
);
loop {
let res = iter.next();
match res {
Some(Ok(val)) => {
match compare(&val.0, property_end.as_slice()) {
std::cmp::Ordering::Less | std::cmp::Ordering::Equal => {
if suffix.is_some() {
if val.0.len() < (key_size + 2)
|| val.0[1 + key_size] != suffix.unwrap()
{
continue;
}
// } else if val.0.len() > (key_size + 1) {
// continue;
}
self.tx()
.delete(val.0)
.map_err(|_| StorageError::BackendError)?;
}
_ => {} //,
}
}
Some(Err(_e)) => return Err(StorageError::BackendError),
None => {
break;
}
}
}
Ok(())
}
}
pub struct RocksdbKCVStore {
pub struct RocksdbKCVStorage {
/// the main store where all the properties of keys are stored
db: TransactionDB,
/// path for the storage backend data
@ -266,7 +320,7 @@ fn compare<T: Ord>(a: &[T], b: &[T]) -> std::cmp::Ordering {
return a.len().cmp(&b.len());
}
impl ReadTransaction for RocksdbKCVStore {
impl ReadTransaction for RocksdbKCVStorage {
/// returns a list of (key,value) that are in the range specified in the request
fn get_all_keys_and_values(
&self,
@ -353,7 +407,7 @@ impl ReadTransaction for RocksdbKCVStore {
}
}
impl KCVStore for RocksdbKCVStore {
impl KCVStorage for RocksdbKCVStorage {
fn write_transaction(
&self,
method: &mut dyn FnMut(&mut dyn WriteTransaction) -> Result<(), StorageError>,
@ -373,7 +427,7 @@ impl KCVStore for RocksdbKCVStore {
}
}
impl WriteTransaction for RocksdbKCVStore {
impl WriteTransaction for RocksdbKCVStorage {
/// Save a property value to the store.
fn put(
&self,
@ -439,9 +493,22 @@ impl WriteTransaction for RocksdbKCVStore {
Ok(())
})
}
fn del_all_values(
&self,
prefix: u8,
key: &Vec<u8>,
property_size: usize,
suffix: Option<u8>,
family: &Option<String>,
) -> Result<(), StorageError> {
self.write_transaction(&mut |tx| {
tx.del_all_values(prefix, key, property_size, suffix, family)
})
}
}
impl RocksdbKCVStore {
impl RocksdbKCVStorage {
pub fn path(&self) -> PathBuf {
PathBuf::from(&self.path)
}
@ -537,9 +604,9 @@ impl RocksdbKCVStore {
let mut trailing_zeros = vec![0u8; key_size - key_prefix.len()];
vec_key_start.append(&mut trailing_zeros);
let mut vec_key_end = key_prefix.clone();
let mut trailing_max = vec![255u8; key_size - key_prefix.len()];
vec_key_end.append(&mut trailing_max);
// let mut vec_key_end = key_prefix.clone();
// let mut trailing_max = vec![255u8; key_size - key_prefix.len()];
// vec_key_end.append(&mut trailing_max);
Self::compute_property(prefix, &vec_key_start, suffix)
}
@ -572,9 +639,9 @@ impl RocksdbKCVStore {
new
}
/// Opens the store and returns a KCVStore object that should be kept and used to manipulate the properties
/// Opens the store and returns a KCVStorage object that should be kept and used to manipulate the properties
/// The key is the encryption key for the data at rest.
pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result<RocksdbKCVStore, StorageError> {
pub fn open<'a>(path: &Path, key: [u8; 32]) -> Result<RocksdbKCVStorage, StorageError> {
let mut opts = Options::default();
opts.set_use_fsync(true);
opts.create_if_missing(true);
@ -590,7 +657,7 @@ impl RocksdbKCVStore {
Env::version()
);
Ok(RocksdbKCVStore {
Ok(RocksdbKCVStorage {
db: db,
path: path.to_str().unwrap().to_string(),
})

@ -33,6 +33,7 @@ async-std = { version = "1.12.0", features = [ "attributes", "unstable" ] }
threshold_crypto = "0.4.0"
rand = { version = "0.7", features = ["getrandom"] }
web-time = "0.2.0"
either = "1.8.1"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" }

@ -10,10 +10,16 @@
//! RocksDb Backend for UserStorage trait
use crate::types::*;
use crate::user_storage::repo::RepoStorage;
use crate::user_storage::*;
use either::Either::{Left, Right};
use ng_repo::block_storage::BlockStorage;
use ng_repo::repo::Repo;
use ng_repo::store::Store;
use ng_repo::{errors::StorageError, types::*};
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStore;
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStorage;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use std::{
cmp::{max, min},
collections::HashMap,
@ -21,13 +27,13 @@ use std::{
};
pub(crate) struct RocksDbUserStorage {
user_storage: RocksdbKCVStore,
user_storage: RocksdbKCVStorage,
}
impl RocksDbUserStorage {
pub fn open(path: &PathBuf, master_key: [u8; 32]) -> Result<Self, StorageError> {
Ok(RocksDbUserStorage {
user_storage: RocksdbKCVStore::open(path, master_key)?,
user_storage: RocksdbKCVStorage::open(path, master_key)?,
})
}
}
@ -36,4 +42,29 @@ impl UserStorage for RocksDbUserStorage {
fn repo_id_to_store_overlay(&self, id: &RepoId) -> Result<StoreOverlay, StorageError> {
unimplemented!();
}
fn get_all_store_and_repo_ids(&self) -> Result<HashMap<StoreRepo, Vec<RepoId>>, StorageError> {
RepoStorage::get_all_store_and_repo_ids(&self.user_storage)
}
fn load_store(
&self,
repo_store: &StoreRepo,
block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Repo, StorageError> {
RepoStorage::load(
repo_store.repo_id(),
Right(block_storage),
&self.user_storage,
)
}
fn load_repo(&self, repo_id: &RepoId, store: Arc<Store>) -> Result<Repo, StorageError> {
RepoStorage::load(repo_id, Left(store), &self.user_storage)
}
fn save_repo(&self, repo: &Repo) -> Result<(), StorageError> {
RepoStorage::create_from_repo(repo, &self.user_storage)?;
Ok(())
}
}

@ -65,7 +65,7 @@ impl SiteV0 {
}
}
fn create_individual_(
async fn create_individual_(
user_priv_key: PrivKey,
verifier: &mut Verifier,
site_name: SiteName,
@ -99,14 +99,17 @@ impl SiteV0 {
verifier.reserve_more(18)?;
let public_repo =
verifier.new_store_default(&site_pubkey, &user_priv_key, &public_store, false)?;
let public_repo = verifier
.new_store_default(&site_pubkey, &user_priv_key, &public_store, false)
.await?;
let protected_repo =
verifier.new_store_default(&site_pubkey, &user_priv_key, &protected_store, false)?;
let protected_repo = verifier
.new_store_default(&site_pubkey, &user_priv_key, &protected_store, false)
.await?;
let private_repo =
verifier.new_store_default(&site_pubkey, &user_priv_key, &private_store, true)?;
let private_repo = verifier
.new_store_default(&site_pubkey, &user_priv_key, &private_store, true)
.await?;
// TODO: create user branch
// TODO: add the 2 commits in user branch about StoreUpdate of public and protected stores.
@ -126,22 +129,22 @@ impl SiteV0 {
})
}
pub fn create_individual(
pub async fn create_individual(
name: String,
user_priv_key: PrivKey,
verifier: &mut Verifier,
) -> Result<Self, NgError> {
Self::create_individual_(user_priv_key, verifier, SiteName::Name(name))
Self::create_individual_(user_priv_key, verifier, SiteName::Name(name)).await
}
pub fn create_personal(
pub async fn create_personal(
user_priv_key: PrivKey,
verifier: &mut Verifier,
) -> Result<Self, NgError> {
Self::create_individual_(user_priv_key, verifier, SiteName::Personal)
Self::create_individual_(user_priv_key, verifier, SiteName::Personal).await
}
pub fn create_org(name: String) -> Result<Self, NgError> {
pub async fn create_org(name: String) -> Result<Self, NgError> {
let (site_privkey, site_pubkey) = generate_keypair();
let (public_store_privkey, public_store_pubkey) = generate_keypair();

@ -72,6 +72,12 @@ impl VerifierType {
_ => false,
}
}
pub fn is_persistent(&self) -> bool {
match self {
Self::RocksDb => true,
_ => false,
}
}
}
//type LastSeqFn = fn(peer_id: PubKey, qty: u16) -> Result<u64, NgError>;
@ -118,6 +124,13 @@ impl VerifierConfigType {
_ => false,
}
}
pub(crate) fn is_persistent(&self) -> bool {
match self {
Self::RocksDb(_) => true,
_ => false,
}
}
}
#[derive(Debug)]

@ -0,0 +1,173 @@
// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Branch storage on disk
use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::hash::Hash;
use std::hash::Hasher;
use std::time::SystemTime;
use ng_net::errors::ProtocolError;
use ng_net::types::*;
use ng_repo::block_storage::BlockStorage;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::repo::BranchInfo;
use ng_repo::repo::Repo;
use ng_repo::store::Store;
use ng_repo::types::BranchId;
use ng_repo::types::BranchType;
use ng_repo::types::BranchWriteCapSecret;
use ng_repo::types::ObjectId;
use ng_repo::types::ReadCap;
use ng_repo::types::RepoId;
use ng_repo::types::SymKey;
use ng_repo::types::Timestamp;
use ng_repo::types::TopicId;
use serde_bare::to_vec;
use super::prop;
pub struct BranchStorage<'a> {
storage: &'a dyn KCVStorage,
id: BranchId,
}
impl<'a> BranchStorage<'a> {
const PREFIX: u8 = b'c';
// branch properties suffixes
const TYPE: u8 = b'b';
const PUBLISHER: u8 = b'p';
const READ_CAP: u8 = b'r';
const TOPIC: u8 = b't';
const ALL_PROPERTIES: [u8; 4] = [Self::TYPE, Self::PUBLISHER, Self::READ_CAP, Self::TOPIC];
const PREFIX_HEADS: u8 = b'h';
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::TYPE;
pub fn open(
id: &BranchId,
storage: &'a dyn KCVStorage,
) -> Result<BranchStorage<'a>, StorageError> {
let opening = BranchStorage {
id: id.clone(),
storage,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn create_from_info(
info: &BranchInfo,
storage: &'a dyn KCVStorage,
) -> Result<BranchStorage<'a>, StorageError> {
Self::create(
&info.id,
&info.read_cap,
&info.branch_type,
&info.topic,
info.topic_priv_key.as_ref(),
storage,
)
}
pub fn create(
id: &BranchId,
read_cap: &ReadCap,
branch_type: &BranchType,
topic: &TopicId,
publisher: Option<&BranchWriteCapSecret>,
storage: &'a dyn KCVStorage,
) -> Result<BranchStorage<'a>, StorageError> {
let bs = BranchStorage {
id: id.clone(),
storage,
};
if bs.exists() {
return Err(StorageError::AlreadyExists);
}
storage.write_transaction(&mut |tx| {
let id_ser = to_vec(&id)?;
let value = to_vec(read_cap)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::READ_CAP), &value, &None)?;
let value = to_vec(branch_type)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::TYPE), &value, &None)?;
let value = to_vec(topic)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::TOPIC), &value, &None)?;
if let Some(privkey) = publisher {
let value = to_vec(privkey)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::PUBLISHER), &value, &None)?;
}
Ok(())
})?;
Ok(bs)
}
pub fn load(id: &BranchId, storage: &'a dyn KCVStorage) -> Result<BranchInfo, StorageError> {
let props = storage.get_all_properties_of_key(
Self::PREFIX,
to_vec(id).unwrap(),
Self::ALL_PROPERTIES.to_vec(),
&None,
)?;
let bs = BranchInfo {
id: id.clone(),
branch_type: prop(Self::TYPE, &props)?,
read_cap: prop(Self::READ_CAP, &props)?,
topic: prop(Self::TOPIC, &props)?,
topic_priv_key: prop(Self::PUBLISHER, &props).ok(),
};
Ok(bs)
}
pub fn exists(&self) -> bool {
self.storage
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
pub fn id(&self) -> &RepoId {
&self.id
}
pub fn del(&self) -> Result<(), StorageError> {
self.storage.write_transaction(&mut |tx| {
let key = &to_vec(&self.id)?;
tx.del_all(Self::PREFIX, key, &Self::ALL_PROPERTIES, &None)?;
let size = to_vec(&ObjectId::nil())?.len();
tx.del_all_values(Self::PREFIX_HEADS, key, size, None, &None)?;
Ok(())
})
}
}
#[cfg(test)]
mod test {
use ng_repo::errors::StorageError;
use ng_repo::types::*;
use ng_repo::utils::*;
use std::fs;
#[test]
pub fn test_repo() {}
}

@ -0,0 +1,29 @@
// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
pub mod storage;
pub use storage::*;
pub mod repo;
pub mod branch;
use ng_repo::errors::StorageError;
use serde::Deserialize;
use serde_bare::from_slice;
use std::collections::HashMap;
pub(crate) fn prop<A>(prop: u8, props: &HashMap<u8, Vec<u8>>) -> Result<A, StorageError>
where
A: for<'a> Deserialize<'a>,
{
Ok(from_slice(
&props.get(&prop).ok_or(StorageError::PropertyNotFound)?,
)?)
}

@ -0,0 +1,352 @@
// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Repo storage on disk
use std::collections::hash_map::DefaultHasher;
use std::collections::HashMap;
use std::collections::HashSet;
use std::hash::Hash;
use std::hash::Hasher;
use std::time::SystemTime;
use either::{Either, Left, Right};
use ng_net::errors::ProtocolError;
use ng_net::types::*;
use ng_repo::block_storage::BlockStorage;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::repo::BranchInfo;
use ng_repo::repo::Repo;
use ng_repo::store::Store;
use ng_repo::types::BranchId;
use ng_repo::types::BranchType;
use ng_repo::types::ReadCap;
use ng_repo::types::RepoId;
use ng_repo::types::RepoWriteCapSecret;
use ng_repo::types::Repository;
use ng_repo::types::StoreRepo;
use ng_repo::types::SymKey;
use ng_repo::types::Timestamp;
use ng_repo::utils::now_timestamp;
use serde::Deserialize;
use serde_bare::from_slice;
use serde_bare::to_vec;
use std::sync::{Arc, RwLock};
use super::branch::BranchStorage;
use super::prop;
pub struct RepoStorage<'a> {
storage: &'a dyn KCVStorage,
id: RepoId,
}
impl<'a> RepoStorage<'a> {
const PREFIX: u8 = b'r';
// repo properties suffixes
const SIGNER_CAP_OWNER: u8 = b'a';
const SIGNER_CAP_PARTIAL: u8 = b'b';
const CHAT_BRANCH: u8 = b'c';
const DEFINITION: u8 = b'd';
const STORE_BRANCH: u8 = b'e';
const INHERIT: u8 = b'i';
const OVERLAY_BRANCH: u8 = b'l';
const MAIN_BRANCH: u8 = b'm';
const OWNERS: u8 = b'o';
const PINNED: u8 = b'p';
const QUORUM: u8 = b'q';
const READ_CAP: u8 = b'r';
const STORE_REPO: u8 = b's';
const SIGNER_CAP_TOTAL: u8 = b't';
const USER_BRANCH: u8 = b'u';
const WRITE_CAP_SECRET: u8 = b'w';
const ALL_PROPERTIES: [u8; 16] = [
Self::SIGNER_CAP_OWNER,
Self::SIGNER_CAP_PARTIAL,
Self::CHAT_BRANCH,
Self::DEFINITION,
Self::STORE_BRANCH,
Self::INHERIT,
Self::OVERLAY_BRANCH,
Self::MAIN_BRANCH,
Self::OWNERS,
Self::PINNED,
Self::QUORUM,
Self::READ_CAP,
Self::STORE_REPO,
Self::SIGNER_CAP_TOTAL,
Self::USER_BRANCH,
Self::WRITE_CAP_SECRET,
];
const PREFIX_BRANCHES: u8 = b'b';
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::READ_CAP;
pub fn open(id: &RepoId, storage: &'a dyn KCVStorage) -> Result<RepoStorage<'a>, StorageError> {
let opening = RepoStorage {
id: id.clone(),
storage,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn create_from_repo(
repo: &Repo,
storage: &'a dyn KCVStorage,
) -> Result<RepoStorage<'a>, StorageError> {
Self::create(
&repo.id,
repo.read_cap.as_ref().unwrap(),
repo.write_cap.as_ref(),
repo.store.get_store_repo(),
&repo.repo_def,
&repo.branches,
storage,
)
}
// TODO: signers
pub fn create(
id: &RepoId,
read_cap: &ReadCap,
write_cap: Option<&RepoWriteCapSecret>,
store_repo: &StoreRepo,
repo_def: &Repository,
branches: &HashMap<BranchId, BranchInfo>,
storage: &'a dyn KCVStorage,
) -> Result<RepoStorage<'a>, StorageError> {
let repo = RepoStorage {
id: id.clone(),
storage,
};
if repo.exists() {
return Err(StorageError::AlreadyExists);
}
storage.write_transaction(&mut |tx| {
let id_ser = to_vec(&id)?;
let value = to_vec(read_cap)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::READ_CAP), &value, &None)?;
let value = to_vec(store_repo)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::STORE_REPO), &value, &None)?;
let value = to_vec(repo_def)?;
tx.put(Self::PREFIX, &id_ser, Some(Self::DEFINITION), &value, &None)?;
if let Some(wc) = write_cap {
let value = to_vec(wc)?;
tx.put(
Self::PREFIX,
&id_ser,
Some(Self::WRITE_CAP_SECRET),
&value,
&None,
)?;
}
Ok(())
})?;
for branch in branches.values() {
BranchStorage::create_from_info(branch, storage)?;
}
Ok(repo)
}
pub fn load(
id: &RepoId,
store: Either<Arc<Store>, Arc<RwLock<dyn BlockStorage + Send + Sync>>>,
storage: &'a dyn KCVStorage,
) -> Result<Repo, StorageError> {
let branch_ids = Self::get_all_branches(id, storage)?;
let mut branches = HashMap::new();
let mut overlay_branch_read_cap = None;
for branch in branch_ids {
let info = BranchStorage::load(&branch, storage)?;
if info.branch_type == BranchType::Overlay {
overlay_branch_read_cap = Some(info.read_cap.clone());
}
let _ = branches.insert(branch, info);
}
let props = storage.get_all_properties_of_key(
Self::PREFIX,
to_vec(id).unwrap(),
Self::ALL_PROPERTIES.to_vec(),
&None,
)?;
let store = match store {
Left(s) => s,
Right(bs) => {
// we want to load a store. let's start by retrieving the store repo
// TODO: check that it has a STORE_BRANCH
let store_repo: StoreRepo =
prop(Self::STORE_REPO, &props).map_err(|_| StorageError::NotAStoreRepo)?;
let store_info = branches.get(id).ok_or(StorageError::NotFound)?;
let overlay_branch_read_cap = if store_repo.is_private() {
store_info.read_cap.clone()
} else {
overlay_branch_read_cap.ok_or(StorageError::OverlayBranchNotFound)?
};
Arc::new(Store::new(
store_repo,
store_info.read_cap.clone(),
overlay_branch_read_cap,
bs,
))
}
};
let repo = Repo {
id: id.clone(),
repo_def: prop(Self::DEFINITION, &props)?,
read_cap: prop(Self::READ_CAP, &props)?,
write_cap: prop(Self::WRITE_CAP_SECRET, &props).ok(),
//TODO: signer
signer: None,
//TODO: members
members: HashMap::new(),
branches,
opened_branches: HashMap::new(),
store,
};
Ok(repo)
}
pub fn exists(&self) -> bool {
self.storage
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
&None,
)
.is_ok()
}
pub fn id(&self) -> &RepoId {
&self.id
}
pub fn get_all_branches(
id: &RepoId,
storage: &'a dyn KCVStorage,
) -> Result<Vec<BranchId>, StorageError> {
let size = to_vec(&BranchId::nil())?.len();
let key_prefix = to_vec(id).unwrap();
let mut res: Vec<BranchId> = vec![];
let total_size = key_prefix.len() + size;
for branch in storage.get_all_keys_and_values(
Self::PREFIX_BRANCHES,
total_size,
key_prefix,
None,
&None,
)? {
if branch.0.len() == total_size + 1 {
let branch_id: BranchId = from_slice(&branch.0[1..branch.0.len()])?;
res.push(branch_id);
}
}
Ok(res)
}
pub fn get_all_store_and_repo_ids(
storage: &'a dyn KCVStorage,
) -> Result<HashMap<StoreRepo, Vec<RepoId>>, StorageError> {
let mut res = HashMap::new();
let size = to_vec(&RepoId::nil())?.len();
let mut store_ids = HashSet::new();
for (store_id_ser, _) in storage.get_all_keys_and_values(
Self::PREFIX,
size,
vec![],
Some(Self::STORE_BRANCH),
&None,
)? {
let store_id: RepoId = from_slice(&store_id_ser)?;
store_ids.insert(store_id);
}
let mut repo_ids = HashMap::new();
for (repo_id_ser, store_repo_ser) in storage.get_all_keys_and_values(
Self::PREFIX,
size,
vec![],
Some(Self::STORE_REPO),
&None,
)? {
let repo_id: RepoId = from_slice(&repo_id_ser)?;
let store_repo: StoreRepo = from_slice(&store_repo_ser)?;
repo_ids.insert(repo_id, store_repo);
}
for store in store_ids.iter() {
let store_repo = repo_ids.get(store).ok_or(StorageError::NotAStoreRepo)?;
res.insert(*store_repo, vec![]);
}
for (repo_id, store_repo) in repo_ids.iter() {
if store_ids.get(repo_id).is_none() {
let repos = res.get_mut(store_repo).ok_or(StorageError::NotFound)?;
repos.push(*repo_id);
}
}
Ok(res)
}
// pub fn get_type(&self) -> Result<u8, ProtocolError> {
// let type_ser = self
// .store
// .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE), &None)?;
// let t: (u8, u32, Option<String>) = from_slice(&type_ser)?;
// // if t.1 < now_timestamp() {
// // return Err(ProtocolError::Expired);
// // }
// Ok(t.0)
// }
// pub fn is_expired(&self) -> Result<bool, StorageError> {
// let expire_ser =
// self.store
// .get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::TYPE), &None)?;
// let expire: (u8, u32, Option<String>) = from_slice(&expire_ser)?;
// if expire.1 < now_timestamp() {
// return Ok(true);
// }
// Ok(false)
// }
pub fn del(&self) -> Result<(), StorageError> {
self.storage.write_transaction(&mut |tx| {
let key = &to_vec(&self.id)?;
tx.del_all(Self::PREFIX, key, &Self::ALL_PROPERTIES, &None)?;
let size = to_vec(&BranchId::nil())?.len();
tx.del_all_values(Self::PREFIX_BRANCHES, key, size, None, &None)?;
Ok(())
})
}
}
#[cfg(test)]
mod test {
use ng_repo::errors::StorageError;
use ng_repo::types::*;
use ng_repo::utils::*;
use std::fs;
#[test]
pub fn test_repo() {}
}

@ -9,18 +9,33 @@
//! Storage of user application data (RDF, content of rich-text document, etc)
use ng_repo::{errors::StorageError, types::*};
use ng_repo::{
block_storage::BlockStorage, errors::StorageError, repo::Repo, store::Store, types::*,
};
use crate::types::*;
use std::{
cmp::{max, min},
collections::HashMap,
mem::size_of_val,
sync::{Arc, RwLock},
};
pub trait UserStorage: Send + Sync {
/// Gets the StoreRepo for a given RepoId
fn repo_id_to_store_overlay(&self, id: &RepoId) -> Result<StoreOverlay, StorageError>;
fn get_all_store_and_repo_ids(&self) -> Result<HashMap<StoreRepo, Vec<RepoId>>, StorageError>;
fn load_store(
&self,
repo_store: &StoreRepo,
block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Repo, StorageError>;
fn load_repo(&self, repo_id: &RepoId, store: Arc<Store>) -> Result<Repo, StorageError>;
fn save_repo(&self, repo: &Repo) -> Result<(), StorageError>;
}
pub(crate) struct InMemoryUserStorage {
@ -43,4 +58,23 @@ impl UserStorage for InMemoryUserStorage {
.ok_or(StorageError::NotFound)?
.to_owned())
}
fn get_all_store_and_repo_ids(&self) -> Result<HashMap<StoreRepo, Vec<RepoId>>, StorageError> {
unimplemented!();
}
fn load_store(
&self,
repo_store: &StoreRepo,
block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Repo, StorageError> {
unimplemented!();
}
fn load_repo(&self, repo_id: &RepoId, store: Arc<Store>) -> Result<Repo, StorageError> {
unimplemented!();
}
fn save_repo(&self, repo: &Repo) -> Result<(), StorageError> {
unimplemented!();
}
}

@ -10,6 +10,9 @@
//! Repo object (on heap) to handle a Repository
use crate::types::*;
use crate::user_storage::repo::RepoStorage;
use ng_net::actor::SoS;
use ng_net::broker::{Broker, BROKER};
use ng_repo::log::*;
use ng_repo::object::Object;
use ng_repo::{
@ -32,7 +35,7 @@ use core::fmt;
#[cfg(not(target_family = "wasm"))]
use crate::rocksdb_user_storage::RocksDbUserStorage;
use crate::user_storage::{InMemoryUserStorage, UserStorage};
use async_std::sync::Mutex;
use async_std::sync::{Mutex, RwLockWriteGuard};
use std::{collections::HashMap, path::PathBuf, sync::Arc};
use ng_net::{
@ -50,7 +53,7 @@ pub struct Verifier {
pub config: VerifierConfig,
pub connected_server_id: Option<PubKey>,
graph_dataset: Option<oxigraph::store::Store>,
user_storage: Option<Box<dyn UserStorage>>,
user_storage: Option<Arc<Box<dyn UserStorage>>>,
block_storage: Option<Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>>,
last_seq_num: u64,
peer_id: PubKey,
@ -58,8 +61,9 @@ pub struct Verifier {
last_reservation: SystemTime,
stores: HashMap<OverlayId, Arc<Store>>,
repos: HashMap<RepoId, Repo>,
topics: HashMap<(OverlayId, TopicId), (RepoId, BranchId)>,
/// only used for InMemory type, to store the outbox
in_memory_outbox: Vec<Event>,
in_memory_outbox: Vec<EventOutboxStorage>,
}
impl fmt::Debug for Verifier {
@ -69,6 +73,12 @@ impl fmt::Debug for Verifier {
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
struct EventOutboxStorage {
event: Event,
overlay: OverlayId,
}
impl Verifier {
#[allow(deprecated)]
#[cfg(any(test, feature = "testing"))]
@ -96,14 +106,44 @@ impl Verifier {
last_reservation: SystemTime::UNIX_EPOCH,
stores: HashMap::new(),
repos: HashMap::new(),
topics: HashMap::new(),
in_memory_outbox: vec![],
}
}
pub fn get_store_mut(&mut self, store_repo: &StoreRepo) -> Arc<Store> {
pub fn load(&mut self) -> Result<(), NgError> {
if self.is_persistent() && self.user_storage.is_some() && self.block_storage.is_some() {
let user_storage = Arc::clone(self.user_storage.as_ref().unwrap());
let stores = user_storage.get_all_store_and_repo_ids()?;
for (store, repos) in stores.iter() {
let repo = user_storage
.load_store(store, Arc::clone(self.block_storage.as_ref().unwrap()))?;
self.stores.insert(
store.overlay_id_for_storage_purpose(),
Arc::clone(&repo.store),
);
let store = Arc::clone(&repo.store);
self.add_repo_without_saving(repo);
for repo_id in repos {
let repo = user_storage.load_repo(repo_id, Arc::clone(&store))?;
self.add_repo_without_saving(repo);
}
}
}
Ok(())
}
fn is_persistent(&self) -> bool {
self.config.config_type.is_persistent()
}
pub fn get_store_or_load(&mut self, store_repo: &StoreRepo) -> Arc<Store> {
let overlay_id = store_repo.overlay_id_for_storage_purpose();
let store = self.stores.entry(overlay_id).or_insert_with(|| {
// FIXME: get store_readcap from user storage
// FIXME: get store_readcap and store_overlay_branch_readcap from user storage
let store_readcap = ReadCap::nil();
let store_overlay_branch_readcap = ReadCap::nil();
let store = Store::new(
@ -115,7 +155,7 @@ impl Verifier {
.block_storage
.as_ref()
.ok_or(core::fmt::Error)
.expect("get_store_mut cannot be called on Remote Verifier"),
.expect("get_store_or_load cannot be called on Remote Verifier"),
),
);
Arc::new(store)
@ -127,8 +167,6 @@ impl Verifier {
&mut self,
store_repo: &StoreRepo,
mut repo: Repo,
//read_cap: &ReadCap,
//overlay_read_cap: Option<&ReadCap>,
) -> Result<Repo, NgError> {
let read_cap = repo.read_cap.to_owned().unwrap();
let overlay_read_cap = repo.overlay_branch_read_cap().cloned();
@ -152,6 +190,7 @@ impl Verifier {
mut_store.set_read_caps(read_cap, overlay_read_cap);
let new_store = Arc::new(mut_store);
let _ = self.stores.insert(overlay_id, Arc::clone(&new_store));
// TODO: store in user_storage
repo.store = new_store;
//let _ = self.repos.insert(*store_repo.repo_id(), repo);
Ok(repo)
@ -197,17 +236,14 @@ impl Verifier {
if self.stores.contains_key(&overlay_id) {
return;
}
// TODO: store in user_storage
self.stores.insert(overlay_id, store);
}
pub(crate) fn new_event(
pub(crate) async fn new_event(
&mut self,
//publisher: &PrivKey,
//seq: &mut u64,
commit: &Commit,
additional_blocks: &Vec<BlockId>,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
repo_id: RepoId,
store_repo: &StoreRepo,
) -> Result<(), NgError> {
@ -215,53 +251,40 @@ impl Verifier {
self.reserve_more(1)?;
}
self.new_event_(commit, additional_blocks, repo_id, store_repo)
.await
}
fn new_event_(
async fn new_event_(
&mut self,
//publisher: &PrivKey,
//seq: &mut u64,
commit: &Commit,
additional_blocks: &Vec<BlockId>,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
// store: &Store, // store could be omitted and a store repo ID would be given instead.
repo_id: RepoId,
store_repo: &StoreRepo,
) -> Result<(), NgError> {
//let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
//let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
//self.get_store(store_repo)
let publisher = self.config.peer_priv_key.clone();
self.last_seq_num += 1;
let seq_num = self.last_seq_num;
let repo = self.get_repo(repo_id, store_repo)?;
let event = Event::new(&publisher, seq_num, commit, additional_blocks, repo)?;
self.send_or_save_event_to_outbox(event)?;
self.send_or_save_event_to_outbox(event, repo.store.overlay_id)
.await?;
Ok(())
}
fn new_event_with_repo_(
async fn new_event_with_repo_(
&mut self,
//publisher: &PrivKey,
//seq: &mut u64,
commit: &Commit,
additional_blocks: &Vec<BlockId>,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
// store: &Store, // store could be omitted and a store repo ID would be given instead.
repo: &Repo,
) -> Result<(), NgError> {
//let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
//let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
//self.get_store(store_repo)
let publisher = self.config.peer_priv_key.clone();
self.last_seq_num += 1;
let seq_num = self.last_seq_num;
let event = Event::new(&publisher, seq_num, commit, additional_blocks, repo)?;
self.send_or_save_event_to_outbox(event)?;
self.send_or_save_event_to_outbox(event, repo.store.overlay_id)
.await?;
Ok(())
}
@ -273,7 +296,7 @@ impl Verifier {
Ok(self.last_seq_num)
}
pub(crate) fn new_events_with_repo(
pub(crate) async fn new_events_with_repo(
&mut self,
events: Vec<(Commit, Vec<Digest>)>,
repo: &Repo,
@ -285,12 +308,12 @@ impl Verifier {
self.reserve_more(missing_count as u64 + 1)?;
}
for event in events {
self.new_event_with_repo_(&event.0, &event.1, repo)?;
self.new_event_with_repo_(&event.0, &event.1, repo).await?;
}
Ok(())
}
pub(crate) fn new_events(
pub(crate) async fn new_events(
&mut self,
events: Vec<(Commit, Vec<Digest>)>,
repo_id: RepoId,
@ -303,7 +326,8 @@ impl Verifier {
self.reserve_more(missing_count as u64 + 1)?;
}
for event in events {
self.new_event_(&event.0, &event.1, repo_id.clone(), store_repo)?;
self.new_event_(&event.0, &event.1, repo_id.clone(), store_repo)
.await?;
}
Ok(())
}
@ -325,23 +349,107 @@ impl Verifier {
self.take_some_peer_last_seq_numbers(max(at_least as u16, qty))
}
fn send_or_save_event_to_outbox<'a>(&'a mut self, event: Event) -> Result<(), NgError> {
fn take_events_from_outbox(&mut self) -> Result<Vec<EventOutboxStorage>, NgError> {
match &self.config.config_type {
VerifierConfigType::JsSaveSession(js) => {
let events_ser = (js.outbox_read_function)(self.peer_id)?;
let mut res = Vec::with_capacity(events_ser.len());
for event_ser in events_ser {
let event = serde_bare::from_slice(&event_ser)?;
res.push(event);
}
Ok(res)
}
VerifierConfigType::RocksDb(path) => {
let mut path = path.clone();
path.push(format!("outbox{}", self.peer_id.to_hash_string()));
let file = read(path.clone());
let mut res = vec![];
match file {
Ok(ser) => {
if ser.len() > 0 {
let mut pos: usize = 0;
let usize_size = usize::BITS as usize / 8;
loop {
let size = usize::from_le_bytes(
ser[pos..pos + usize_size]
.try_into()
.map_err(|_| NgError::SerializationError)?,
);
//log_info!("size={}", size);
pos += usize_size;
//let buff = &ser[pos..pos + size];
//log_info!("EVENT={:?}", buff.len());
let event = serde_bare::from_slice(&ser[pos..pos + size])?;
//log_info!("EVENT_DESER={:?}", event);
res.push(event);
pos += size;
if pos >= ser.len() {
break;
}
}
}
}
Err(_) => {}
}
let _ = std::fs::remove_file(path);
Ok(res)
}
VerifierConfigType::Memory => {
let res = self.in_memory_outbox.drain(..).collect();
Ok(res)
}
_ => unimplemented!(),
}
}
async fn send_or_save_event_to_outbox<'a>(
&'a mut self,
event: Event,
overlay: OverlayId,
) -> Result<(), NgError> {
log_debug!("========== EVENT {:03}: {}", event.seq_num(), event);
if self.connected_server_id.is_some() {
// send the events to the server already
// send the event to the server already
let broker = BROKER.write().await;
let user = self.config.user_priv_key.to_pub();
let remote = self.connected_server_id.as_ref().unwrap().to_owned();
self.send_event(event, &broker, &user, &remote, overlay)
.await?;
} else {
match &self.config.config_type {
VerifierConfigType::JsSaveSession(js) => {
let e = EventOutboxStorage { event, overlay };
(js.outbox_write_function)(
self.peer_id,
event.seq_num(),
serde_bare::to_vec(&event)?,
e.event.seq_num(),
serde_bare::to_vec(&e)?,
)?;
}
VerifierConfigType::RocksDb(path) => {}
VerifierConfigType::RocksDb(path) => {
let mut path = path.clone();
std::fs::create_dir_all(path.clone()).unwrap();
path.push(format!("outbox{}", self.peer_id.to_hash_string()));
let mut file = OpenOptions::new()
.append(true)
.create(true)
.open(path)
.map_err(|_| NgError::IoError)?;
let e = EventOutboxStorage { event, overlay };
let event_ser = serde_bare::to_vec(&e)?;
//log_info!("EVENT size={}", event_ser.len());
//log_info!("EVENT {:?}", event_ser);
let size_ser = event_ser.len().to_le_bytes().to_vec();
file.write_all(&size_ser).map_err(|_| NgError::IoError)?;
file.flush().map_err(|_| NgError::IoError)?;
file.write_all(&event_ser).map_err(|_| NgError::IoError)?;
file.flush().map_err(|_| NgError::IoError)?;
file.sync_data().map_err(|_| NgError::IoError)?;
}
VerifierConfigType::Memory => {
self.in_memory_outbox.push(event);
self.in_memory_outbox
.push(EventOutboxStorage { event, overlay });
}
_ => unimplemented!(),
}
@ -349,6 +457,107 @@ impl Verifier {
Ok(())
}
async fn send_event<'a>(
&mut self,
event: Event,
broker: &RwLockWriteGuard<'a, Broker<'a>>,
user: &UserId,
remote: &DirectPeerId,
overlay: OverlayId,
) -> Result<(), NgError> {
let (repo_id, branch_id) = self
.topics
.get(&(overlay, *event.topic_id()))
.ok_or(NgError::TopicNotFound)?
.to_owned();
let opened_as_publisher;
{
let repo = self.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
opened_as_publisher = repo.branch_is_opened_as_publisher(&branch_id);
}
if !opened_as_publisher {
let msg = RepoPinStatusReq::V0(RepoPinStatusReqV0 {
hash: repo_id.into(),
overlay: Some(overlay),
});
match broker
.request::<RepoPinStatusReq, RepoPinStatus>(user, remote, msg)
.await
{
Err(ProtocolError::False) | Err(ProtocolError::RepoAlreadyOpened) => {
// pinning the repo on the server broker
let pin_req;
{
let repo = self.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
pin_req = PinRepo::from_repo(repo, remote);
}
if let Ok(SoS::Single(opened)) = broker
.request::<PinRepo, RepoOpened>(user, remote, pin_req)
.await
{
self.repo_was_opened(&repo_id, &opened)?;
//TODO: check that in the returned opened_repo, the branch we are interested in has effectively been subscribed as publisher by the broker.
} else {
return Err(NgError::ProtocolError);
}
}
Err(_) => return Err(NgError::ProtocolError),
Ok(SoS::Single(pin_status)) => {
// checking that the branch is subscribed as publisher
if !pin_status.is_topic_subscribed_as_publisher(event.topic_id()) {
// we need to subscribe as publisher
let topic_sub;
{
let repo = self.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
let branch_info = repo.branch(&branch_id)?;
if branch_info.topic_priv_key.is_none() {
return Err(NgError::PermissionDenied);
}
topic_sub = TopicSub::new(repo, branch_info, Some(remote));
}
match broker
.request::<TopicSub, TopicSubRes>(user, remote, topic_sub)
.await
{
Ok(_) => {
// TODO, deal with heads
// update Repo locally
let repo =
self.repos.get_mut(&repo_id).ok_or(NgError::RepoNotFound)?;
repo.opened_branches.insert(*event.topic_id(), true);
}
Err(_) => {
return Err(NgError::BrokerError);
}
}
}
}
_ => return Err(NgError::ActorError),
}
// TODO: deal with received known_heads.
// DO a TopicSync
}
Ok(())
}
pub async fn send_outbox(&mut self) -> Result<(), NgError> {
let events = self.take_events_from_outbox()?;
let broker = BROKER.write().await;
let user = self.config.user_priv_key.to_pub();
let remote = self
.connected_server_id
.as_ref()
.ok_or(NgError::NotConnected)?
.clone();
for e in events {
self.send_event(e.event, &broker, &user, &remote, e.overlay)
.await?;
}
Ok(())
}
fn take_some_peer_last_seq_numbers(&mut self, qty: u16) -> Result<(), NgError> {
match &self.config.config_type {
VerifierConfigType::JsSaveSession(js) => {
@ -358,7 +567,7 @@ impl Verifier {
VerifierConfigType::RocksDb(path) => {
let mut path = path.clone();
std::fs::create_dir_all(path.clone()).unwrap();
path.push(format!("lastseq{}", self.peer_id.to_string()));
path.push(format!("lastseq{}", self.peer_id.to_hash_string()));
log_debug!("last_seq path {}", path.display());
let file = read(path.clone());
@ -376,26 +585,19 @@ impl Verifier {
OpenOptions::new()
.write(true)
.open(path)
.map_err(|_| NgError::SerializationError)?,
.map_err(|_| NgError::IoError)?,
old_val,
)
}
Err(_) => (
File::create(path).map_err(|_| NgError::SerializationError)?,
0,
),
Err(_) => (File::create(path).map_err(|_| NgError::IoError)?, 0),
};
if qty > 0 {
let new_val = val + qty as u64;
let spls = SessionPeerLastSeq::V0(new_val);
let ser = spls.ser()?;
file_save
.write_all(&ser)
.map_err(|_| NgError::SerializationError)?;
file_save.write_all(&ser).map_err(|_| NgError::IoError)?;
file_save
.sync_data()
.map_err(|_| NgError::SerializationError)?;
file_save.sync_data().map_err(|_| NgError::IoError)?;
}
self.max_reserved_seq_num = val + qty as u64;
}
@ -448,12 +650,11 @@ impl Verifier {
_ => unimplemented!(), // can be WebRocksDb or RocksDb on wasm platforms
};
let peer_id = config.peer_priv_key.to_pub();
let should_load_last_seq_num = config.config_type.should_load_last_seq_num();
let mut verif = Verifier {
config,
connected_server_id: None,
graph_dataset: graph,
user_storage: user,
user_storage: user.map(|u| Arc::new(u)),
block_storage: block,
peer_id,
last_reservation: SystemTime::UNIX_EPOCH, // this is to avoid reserving 100 seq_nums at every start of a new session
@ -461,10 +662,11 @@ impl Verifier {
last_seq_num: 0,
stores: HashMap::new(),
repos: HashMap::new(),
topics: HashMap::new(),
in_memory_outbox: vec![],
};
// this is important as it will load the last seq from storage
if should_load_last_seq_num {
if verif.config.config_type.should_load_last_seq_num() {
verif.take_some_peer_last_seq_numbers(0)?;
verif.last_seq_num = verif.max_reserved_seq_num;
}
@ -487,7 +689,54 @@ impl Verifier {
unimplemented!();
}
pub fn new_store_default<'a>(
fn add_repo_without_saving(&mut self, repo: Repo) {
self.add_repo_(repo);
}
fn add_repo_and_save(&mut self, repo: Repo) -> &Repo {
let user_storage = self
.user_storage
.as_ref()
.map(|us| Arc::clone(us))
.and_then(|u| if self.is_persistent() { Some(u) } else { None });
let repo_ref = self.add_repo_(repo);
// save in user_storage
if user_storage.is_some() {
let _ = user_storage.unwrap().save_repo(repo_ref);
}
repo_ref
}
fn add_repo_(&mut self, repo: Repo) -> &Repo {
for (branch_id, info) in repo.branches.iter() {
let overlay_id = repo.store.overlay_id.clone();
let topic_id = info.topic.clone();
let repo_id = repo.id.clone();
let branch_id = branch_id.clone();
assert_eq!(
self.topics
.insert((overlay_id, topic_id), (repo_id, branch_id)),
None
);
}
let repo_ref = self.repos.entry(repo.id).or_insert(repo);
repo_ref
}
fn repo_was_opened(
&mut self,
repo_id: &RepoId,
opened_repo: &RepoOpened,
) -> Result<(), NgError> {
let repo = self.repos.get_mut(repo_id).ok_or(NgError::RepoNotFound)?;
for sub in opened_repo {
repo.opened_branches
.insert(*sub.topic_id(), sub.is_publisher());
}
Ok(())
}
pub async fn new_store_default<'a>(
&'a mut self,
creator: &UserId,
creator_priv_key: &PrivKey,
@ -513,7 +762,7 @@ impl Verifier {
.block_storage
.as_ref()
.ok_or(core::fmt::Error)
.expect("get_store_mut cannot be called on Remote Verifier"),
.expect("get_store_or_load cannot be called on Remote Verifier"),
),
);
Arc::new(store)
@ -523,29 +772,25 @@ impl Verifier {
creator_priv_key,
repo_write_cap_secret,
)?;
self.new_events_with_repo(proto_events, &repo)?;
self.new_events_with_repo(proto_events, &repo).await?;
let repo = self.complete_site_store(store_repo, repo)?;
let repo_ref = self.repos.entry(repo.id).or_insert(repo);
let repo_ref = self.add_repo_and_save(repo);
Ok(repo_ref)
}
/// returns the Repo and the last seq_num of the peer
pub fn new_repo_default<'a>(
pub async fn new_repo_default<'a>(
&'a mut self,
creator: &UserId,
creator_priv_key: &PrivKey,
store_repo: &StoreRepo,
) -> Result<&'a Repo, NgError> {
let store = self.get_store_mut(store_repo);
let store = self.get_store_or_load(store_repo);
let repo_write_cap_secret = SymKey::random();
let (repo, proto_events) =
store.create_repo_default(creator, creator_priv_key, repo_write_cap_secret)?;
self.new_events_with_repo(proto_events, &repo)?;
// let mut events = vec![];
// for event in proto_events {
// events.push(self.new_event(&event.0, &event.1, &repo.store)?);
// }
let repo_ref = self.repos.entry(repo.id).or_insert(repo);
self.new_events_with_repo(proto_events, &repo).await?;
let repo_ref = self.add_repo_and_save(repo);
Ok(repo_ref)
}
}
@ -557,8 +802,8 @@ mod test {
use ng_repo::log::*;
use ng_repo::store::Store;
#[test]
pub fn test_new_repo_default() {
#[async_std::test]
pub async fn test_new_repo_default() {
let (creator_priv_key, creator_pub_key) = generate_keypair();
let (publisher_privkey, publisher_pubkey) = generate_keypair();
@ -571,6 +816,7 @@ mod test {
let repo = verifier
.new_repo_default(&creator_pub_key, &creator_priv_key, &store_repo)
.await
.expect("new_default");
log_debug!("REPO OBJECT {}", repo);

@ -320,6 +320,8 @@ pub fn open_wallet_with_pazzle(
return Err(NgWalletError::InvalidPin);
}
log_info!("pazzle={:?}", pazzle);
let opening_pazzle = Instant::now();
verify(&wallet.content_as_bytes(), wallet.sig(), wallet.id())
@ -568,7 +570,7 @@ pub fn create_wallet_first_step_v0(
Ok(intermediary)
}
pub fn create_wallet_second_step_v0(
pub async fn create_wallet_second_step_v0(
mut params: CreateWalletIntermediaryV0,
verifier: &mut Verifier,
) -> Result<
@ -581,10 +583,12 @@ pub fn create_wallet_second_step_v0(
> {
let creating_pazzle = Instant::now();
let mut site = SiteV0::create_personal(params.user_privkey.clone(), verifier).map_err(|e| {
log_err!("{e}");
NgWalletError::InternalError
})?;
let mut site = SiteV0::create_personal(params.user_privkey.clone(), verifier)
.await
.map_err(|e| {
log_err!("{e}");
NgWalletError::InternalError
})?;
let user = params.user_privkey.to_pub();
@ -847,8 +851,9 @@ mod test {
.expect("create_wallet_first_step_v0");
let mut verifier = Verifier::new_dummy();
let (res, _, _) =
create_wallet_second_step_v0(res, &mut verifier).expect("create_wallet_second_step_v0");
let (res, _, _) = create_wallet_second_step_v0(res, &mut verifier)
.await
.expect("create_wallet_second_step_v0");
log_debug!(
"creation of wallet took: {} ms",

@ -528,7 +528,7 @@ async fn test_local_connection() {
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
log_debug!("{}", root.path().to_str().unwrap());
let store = LmdbKCVStore::open(root.path(), master_key);
let store = LmdbKCVStorage::open(root.path(), master_key);
//let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker");

@ -28,7 +28,7 @@ use ng_net::types::{APP_NG_ONE_URL, NG_ONE_URL};
use ng_repo::log::*;
use ng_repo::types::*;
use ng_repo::utils::{generate_keypair, sign, verify};
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStore;
use ng_storage_rocksdb::kcv_storage::RocksdbKCVStorage;
use ng_wallet::types::*;
#[derive(RustEmbed)]
@ -36,7 +36,7 @@ use ng_wallet::types::*;
struct Static;
struct Server {
store: RocksdbKCVStore,
store: RocksdbKCVStorage,
}
impl Server {
@ -158,7 +158,7 @@ async fn main() {
let key: [u8; 32] = [0; 32];
log_debug!("data directory: {}", dir.to_str().unwrap());
fs::create_dir_all(dir.clone()).unwrap();
let store = RocksdbKCVStore::open(&dir, key);
let store = RocksdbKCVStorage::open(&dir, key);
if store.is_err() {
return;
}

@ -11,7 +11,7 @@
use ng_net::types::*;
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::PubKey;
use serde::{Deserialize, Serialize};
@ -20,7 +20,7 @@ use serde_bare::{from_slice, to_vec};
pub struct DynPeer<'a> {
/// peer ID
id: PubKey,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> DynPeer<'a> {
@ -33,7 +33,7 @@ impl<'a> DynPeer<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::ADDRS;
pub fn open(id: &PubKey, store: &'a dyn KCVStore) -> Result<DynPeer<'a>, StorageError> {
pub fn open(id: &PubKey, store: &'a dyn KCVStorage) -> Result<DynPeer<'a>, StorageError> {
let opening = DynPeer {
id: id.clone(),
store,
@ -46,7 +46,7 @@ impl<'a> DynPeer<'a> {
pub fn create(
id: &PubKey,
addrs: &Vec<NetAddr>,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<DynPeer<'a>, StorageError> {
let acc = DynPeer {
id: id.clone(),

@ -10,7 +10,7 @@
//! ng-wallet
use ng_repo::errors::StorageError;
use ng_repo::kcv_storage::KCVStore;
use ng_repo::kcv_storage::KCVStorage;
use ng_repo::types::*;
use ng_wallet::types::*;
use serde::{Deserialize, Serialize};
@ -19,7 +19,7 @@ use serde_bare::{from_slice, to_vec};
pub struct WalletRecord<'a> {
/// Wallet ID
id: WalletId,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
}
impl<'a> WalletRecord<'a> {
@ -33,7 +33,10 @@ impl<'a> WalletRecord<'a> {
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::BOOTSTRAP;
pub fn open(id: &WalletId, store: &'a dyn KCVStore) -> Result<WalletRecord<'a>, StorageError> {
pub fn open(
id: &WalletId,
store: &'a dyn KCVStorage,
) -> Result<WalletRecord<'a>, StorageError> {
let opening = WalletRecord {
id: id.clone(),
store,
@ -46,7 +49,7 @@ impl<'a> WalletRecord<'a> {
pub fn create(
id: &WalletId,
bootstrap: &Bootstrap,
store: &'a dyn KCVStore,
store: &'a dyn KCVStorage,
) -> Result<WalletRecord<'a>, StorageError> {
let wallet = WalletRecord {
id: id.clone(),

Loading…
Cancel
Save