refactor log macros, ngd master key taken from args or file

pull/19/head
Niko 2 years ago
parent fba7a91513
commit 684cd58962
  1. 1
      .gitignore
  2. 236
      Cargo.lock
  3. 1
      ng-app/src-tauri/Cargo.toml
  4. 20
      ng-app/src-tauri/src/lib.rs
  5. 1
      ng-sdk-js/Cargo.toml
  6. 19
      ng-sdk-js/src/lib.rs
  7. 1
      ng-wallet/Cargo.toml
  8. 66
      ng-wallet/src/lib.rs
  9. 24
      ng-wallet/src/types.rs
  10. 1
      ngcli/Cargo.toml
  11. 110
      ngcli/src/main.rs
  12. 11
      ngd/Cargo.toml
  13. 34
      ngd/src/cli.rs
  14. 256
      ngd/src/main.rs
  15. 27
      ngd/src/types.rs
  16. 3
      ngone/Cargo.toml
  17. 16
      ngone/src/main.rs
  18. 10
      p2p-broker/Cargo.toml
  19. 4
      p2p-broker/src/lib.rs
  20. 30
      p2p-broker/src/server.rs
  21. 26
      p2p-broker/src/server_ws.rs
  22. 92
      p2p-broker/src/types.rs
  23. 31
      p2p-broker/src/utils.rs
  24. 1
      p2p-client-ws/Cargo.toml
  25. 6
      p2p-client-ws/src/lib.rs
  26. 37
      p2p-client-ws/src/remote_ws.rs
  27. 22
      p2p-client-ws/src/remote_ws_wasm.rs
  28. 4
      p2p-net/Cargo.toml
  29. 2
      p2p-net/src/actor.rs
  30. 26
      p2p-net/src/broker.rs
  31. 38
      p2p-net/src/broker_connection.rs
  32. 35
      p2p-net/src/connection.rs
  33. 50
      p2p-net/src/lib.rs
  34. 31
      p2p-net/src/types.rs
  35. 17
      p2p-net/src/utils.rs
  36. 13
      p2p-repo/Cargo.toml
  37. 92
      p2p-repo/src/branch.rs
  38. 20
      p2p-repo/src/commit.rs
  39. 159
      p2p-repo/src/lib.rs
  40. 125
      p2p-repo/src/object.rs
  41. 12
      p2p-repo/src/utils.rs
  42. 1
      stores-lmdb/Cargo.toml
  43. 4
      stores-lmdb/src/kcv_store.rs
  44. 144
      stores-lmdb/src/repo_store.rs

1
.gitignore vendored

@ -1,4 +1,5 @@
*~
.ng
.direnv
!.github
\#*

236
Cargo.lock generated

@ -92,12 +92,55 @@ dependencies = [
"libc",
]
[[package]]
name = "anstream"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is-terminal",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d"
[[package]]
name = "anstyle-parse"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "anstyle-wincon"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
dependencies = [
"anstyle",
"windows-sys 0.48.0",
]
[[package]]
name = "anyhow"
version = "1.0.71"
@ -676,6 +719,48 @@ dependencies = [
"zeroize",
]
[[package]]
name = "clap"
version = "4.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80672091db20273a15cf9fdd4e47ed43b5091ec9841bf4c6145c9dfbbcae09ed"
dependencies = [
"clap_builder",
"clap_derive",
"once_cell",
]
[[package]]
name = "clap_builder"
version = "4.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1458a1df40e1e2afebb7ab60ce55c1fa8f431146205aa5f4887e0b111c27636"
dependencies = [
"anstream",
"anstyle",
"bitflags",
"clap_lex",
"strsim",
]
[[package]]
name = "clap_derive"
version = "4.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.16",
]
[[package]]
name = "clap_lex"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]]
name = "cocoa"
version = "0.24.1"
@ -713,6 +798,12 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "combine"
version = "4.6.6"
@ -1010,6 +1101,23 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f215f9b7224f49fb73256115331f677d868b34d18b65dbe4db392e6021eea90"
[[package]]
name = "default-net"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b06254cc7a9b82a9e3e78849eab1af3dbef006bf629b2928b882534d84ded6b9"
dependencies = [
"dlopen2",
"libc",
"memalloc",
"netlink-packet-core",
"netlink-packet-route",
"netlink-sys",
"once_cell",
"system-configuration",
"windows 0.48.0",
]
[[package]]
name = "derive_more"
version = "0.99.17"
@ -1076,6 +1184,29 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
[[package]]
name = "dlopen2"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b121caccfc363e4d9a4589528f3bef7c71b83c6ed01c8dc68cbeeb7fd29ec698"
dependencies = [
"dlopen2_derive",
"libc",
"once_cell",
"winapi",
]
[[package]]
name = "dlopen2_derive"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a09ac8bb8c16a282264c379dffba707b9c998afc7506009137f3c6136888078"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "doc-comment"
version = "0.3.3"
@ -2359,6 +2490,12 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
[[package]]
name = "memalloc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df39d232f5c40b0891c10216992c2f250c054105cb1e56f0fc9032db6203ecc1"
[[package]]
name = "memchr"
version = "2.5.0"
@ -2476,6 +2613,55 @@ dependencies = [
"jni-sys",
]
[[package]]
name = "netlink-packet-core"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e5cf0b54effda4b91615c40ff0fd12d0d4c9a6e0f5116874f03941792ff535a"
dependencies = [
"anyhow",
"byteorder",
"libc",
"netlink-packet-utils",
]
[[package]]
name = "netlink-packet-route"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea993e32c77d87f01236c38f572ecb6c311d592e56a06262a007fd2a6e31253c"
dependencies = [
"anyhow",
"bitflags",
"byteorder",
"libc",
"netlink-packet-core",
"netlink-packet-utils",
]
[[package]]
name = "netlink-packet-utils"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34"
dependencies = [
"anyhow",
"byteorder",
"paste",
"thiserror",
]
[[package]]
name = "netlink-sys"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411"
dependencies = [
"bytes",
"libc",
"log",
]
[[package]]
name = "new_debug_unreachable"
version = "1.0.4"
@ -2487,7 +2673,6 @@ name = "ng-app"
version = "0.1.0"
dependencies = [
"async-std",
"debug_print",
"ng-wallet",
"p2p-net",
"p2p-repo",
@ -2502,7 +2687,6 @@ name = "ng-sdk-js"
version = "0.1.0"
dependencies = [
"async-std",
"debug_print",
"futures",
"getrandom 0.1.16",
"gloo-timers",
@ -2532,7 +2716,6 @@ dependencies = [
"async-std",
"base64-url",
"chacha20poly1305",
"debug_print",
"getrandom 0.1.16",
"image",
"lazy_static",
@ -2553,7 +2736,6 @@ version = "0.1.0"
dependencies = [
"assert_cmd",
"async-std",
"debug_print",
"ed25519-dalek",
"fastbloom-rs",
"futures",
@ -2571,9 +2753,18 @@ name = "ngd"
version = "0.1.0"
dependencies = [
"async-std",
"base64-url",
"clap",
"default-net",
"env_logger",
"log",
"p2p-broker",
"p2p-net",
"p2p-repo",
"serde",
"serde_bare",
"serde_bytes",
"slice_as_array",
]
[[package]]
@ -2582,7 +2773,6 @@ version = "0.1.0"
dependencies = [
"base64-url",
"bytes",
"debug_print",
"env_logger",
"log",
"ng-wallet",
@ -2765,8 +2955,8 @@ dependencies = [
"async-std",
"async-trait",
"async-tungstenite",
"blake3",
"chacha20",
"debug_print",
"futures",
"getrandom 0.2.9",
"hex",
@ -2791,7 +2981,6 @@ dependencies = [
"async-trait",
"async-tungstenite",
"chacha20",
"debug_print",
"futures",
"getrandom 0.2.9",
"p2p-net",
@ -2814,11 +3003,9 @@ dependencies = [
"async-std",
"async-trait",
"blake3",
"debug_print",
"ed25519-dalek",
"futures",
"getrandom 0.2.9",
"gloo-timers",
"noise-protocol",
"noise-rust-crypto",
"num_enum",
@ -2842,11 +3029,14 @@ dependencies = [
"ed25519-dalek",
"fastbloom-rs",
"futures",
"gloo-timers",
"hex",
"log",
"rand 0.7.3",
"serde",
"serde_bare",
"serde_bytes",
"wasm-bindgen",
"web-time",
]
@ -4021,7 +4211,6 @@ dependencies = [
name = "stores-lmdb"
version = "0.1.0"
dependencies = [
"debug_print",
"hex",
"p2p-repo",
"rkv",
@ -4101,6 +4290,27 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "system-configuration"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
dependencies = [
"bitflags",
"core-foundation",
"system-configuration-sys",
]
[[package]]
name = "system-configuration-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "system-deps"
version = "6.1.0"
@ -4797,6 +5007,12 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "uuid"
version = "0.8.2"

@ -17,7 +17,6 @@ crate-type = ["staticlib", "cdylib", "rlib"]
tauri-build = { version = "2.0.0-alpha.5", features = [] }
[dependencies]
debug_print = "1.0.0"
tauri = { version = "2.0.0-alpha.9", features = [] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"

@ -10,8 +10,8 @@ use async_std::stream::StreamExt;
use ng_wallet::types::*;
use ng_wallet::*;
use p2p_net::broker::*;
use p2p_net::log;
use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend};
use p2p_repo::log::*;
use p2p_repo::types::*;
use tauri::{App, Manager};
@ -30,13 +30,13 @@ pub type SetupHook = Box<dyn FnOnce(&mut App) -> Result<(), Box<dyn std::error::
#[tauri::command(rename_all = "snake_case")]
async fn test() -> Result<(), ()> {
log!("test is {}", BROKER.read().await.test());
log_info!("test is {}", BROKER.read().await.test());
Ok(())
}
#[tauri::command(rename_all = "snake_case")]
async fn wallet_gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> Result<ShuffledPazzle, ()> {
log!(
log_info!(
"wallet_gen_shuffle_for_pazzle_opening from rust {}",
pazzle_length
);
@ -45,7 +45,7 @@ async fn wallet_gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> Result<Shuf
#[tauri::command(rename_all = "snake_case")]
async fn wallet_gen_shuffle_for_pin() -> Result<Vec<u8>, ()> {
log!("wallet_gen_shuffle_for_pin from rust");
log_info!("wallet_gen_shuffle_for_pin from rust");
Ok(gen_shuffle_for_pin())
}
@ -55,13 +55,13 @@ async fn wallet_open_wallet_with_pazzle(
pazzle: Vec<u8>,
pin: [u8; 4],
) -> Result<EncryptedWallet, String> {
log!("wallet_open_wallet_with_pazzle from rust {:?}", pazzle);
log_info!("wallet_open_wallet_with_pazzle from rust {:?}", pazzle);
open_wallet_with_pazzle(wallet, pazzle, pin).map_err(|e| e.to_string())
}
#[tauri::command(rename_all = "snake_case")]
async fn wallet_create_wallet(mut params: CreateWalletV0) -> Result<CreateWalletResultV0, String> {
//log!("wallet_create_wallet from rust {:?}", params);
//log_info!("wallet_create_wallet from rust {:?}", params);
params.result_with_wallet_file = false;
let local_save = params.local_save;
let res = create_wallet_v0(params).await.map_err(|e| e.to_string());
@ -77,7 +77,7 @@ async fn wallet_create_wallet(mut params: CreateWalletV0) -> Result<CreateWallet
#[tauri::command(rename_all = "snake_case")]
async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) -> Result<(), ()> {
log!("doc_sync_branch {} {}", nuri, stream_id);
log_info!("doc_sync_branch {} {}", nuri, stream_id);
let mut reader;
{
@ -99,7 +99,7 @@ async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) ->
BROKER.write().await.tauri_stream_cancel(stream_id);
log!("END OF LOOP");
log_info!("END OF LOOP");
Ok(())
}
@ -110,7 +110,7 @@ async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) ->
#[tauri::command(rename_all = "snake_case")]
async fn cancel_doc_sync_branch(stream_id: &str) -> Result<(), ()> {
log!("cancel stream {}", stream_id);
log_info!("cancel stream {}", stream_id);
BROKER
.write()
.await
@ -123,7 +123,7 @@ async fn doc_get_file_from_store_with_object_ref(
nuri: &str,
obj_ref: ObjectRef,
) -> Result<ObjectContent, String> {
log!(
log_info!(
"doc_get_file_from_store_with_object_ref {} {:?}",
nuri,
obj_ref

@ -23,7 +23,6 @@ ng-wallet = { path = "../ng-wallet" }
async-std = { version = "1.12.0", features = ["attributes","unstable"] }
futures = "0.3.24"
pharos = "0.5"
debug_print = "1.0.0"
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"

@ -23,7 +23,7 @@ use p2p_net::broker::*;
use p2p_net::connection::{ClientConfig, StartConfig};
use p2p_net::types::{DirectPeerId, IP};
use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend, Sender};
use p2p_net::{log, sleep};
use p2p_repo::log::*;
use p2p_repo::types::*;
use p2p_repo::utils::generate_keypair;
use serde_json::json;
@ -31,6 +31,7 @@ use std::net::IpAddr;
use std::str::FromStr;
use std::sync::Arc;
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::{future_to_promise, JsFuture};
#[cfg(target_arch = "wasm32")]
@ -112,7 +113,7 @@ extern "C" {
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn test() {
log!("test is {}", BROKER.read().await.test());
log_info!("test is {}", BROKER.read().await.test());
}
#[cfg(target_arch = "wasm32")]
@ -123,7 +124,7 @@ pub async fn doc_get_file_from_store_with_object_ref(
) -> Result<JsValue, JsValue> {
let obj_ref = serde_wasm_bindgen::from_value::<ObjectRef>(obj_ref_js).unwrap();
log!(
log_info!(
"doc_get_file {} {:?} {}",
nuri,
obj_ref.id,
@ -181,14 +182,14 @@ pub async fn doc_sync_branch(anuri: String, callback: &js_sys::Function) -> JsVa
Err(_) => {}
}
}
log!("END OF LOOP");
log_info!("END OF LOOP");
Ok(())
}
spawn_and_log_error(inner_task(reader, anuri, callback.clone()));
let cb = Closure::once(move || {
log!("close channel");
log_info!("close channel");
sender.close_channel()
});
//Closure::wrap(Box::new(move |sender| sender.close_channel()) as Box<FnMut(Sender<Commit>)>);
@ -200,7 +201,7 @@ pub async fn doc_sync_branch(anuri: String, callback: &js_sys::Function) -> JsVa
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn start() {
log!("random {}", random(10));
log_info!("random {}", random(10));
// let mut random_buf = [0u8; 32];
// getrandom::getrandom(&mut random_buf).unwrap();
@ -217,7 +218,7 @@ pub async fn start() {
let (client_priv_key, client_pub_key) = generate_keypair();
let (user_priv_key, user_pub_key) = generate_keypair();
log!("start connecting");
log_info!("start connecting");
let res = BROKER
.write()
@ -236,7 +237,7 @@ pub async fn start() {
}),
)
.await;
log!("broker.connect : {:?}", res);
log_info!("broker.connect : {:?}", res);
if res.is_err() {
return Ok(());
//panic!("Cannot connect");
@ -248,7 +249,7 @@ pub async fn start() {
async fn timer_close(remote_peer_id: DirectPeerId) -> ResultSend<()> {
async move {
sleep!(std::time::Duration::from_secs(3));
log!("timeout");
log_info!("timeout");
BROKER
.write()
.await

@ -8,7 +8,6 @@ description = "keeps the secret keys of all identities of the user in a safe wal
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"

@ -9,13 +9,10 @@
// #[macro_use]
// extern crate slice_as_array;
#[macro_use]
extern crate p2p_net;
#[macro_use]
extern crate lazy_static;
use p2p_net::log;
pub mod types;
pub mod bip39;
@ -36,6 +33,7 @@ use chacha20poly1305::XChaCha20Poly1305;
use image::{imageops::FilterType, io::Reader as ImageReader, ImageOutputFormat};
use safe_transmute::transmute_to_bytes;
use p2p_repo::log::*;
use p2p_repo::types::{PubKey, Site, SiteType, Timestamp};
use p2p_repo::utils::{generate_keypair, now_timestamp, sign, verify};
use rand::prelude::*;
@ -61,7 +59,7 @@ pub fn enc_master_key(
.map_err(|e| NgWalletError::EncryptionError)?;
// `buffer` now contains the encrypted master key
// println!("cipher {:?}", buffer);
// log_debug!("cipher {:?}", buffer);
Ok(buffer.into_array::<48>().unwrap())
}
@ -124,7 +122,7 @@ pub fn enc_encrypted_block(
.map_err(|e| NgWalletError::EncryptionError)?;
// `buffer` now contains the message ciphertext
// println!("encrypted_block ciphertext {:?}", buffer);
// log_debug!("encrypted_block ciphertext {:?}", buffer);
Ok(buffer)
}
@ -151,7 +149,7 @@ pub fn dec_encrypted_block(
.map_err(|e| NgWalletError::DecryptionError)?;
// `ciphertext` now contains the decrypted block
//println!("decrypted_block {:?}", ciphertext);
//log_debug!("decrypted_block {:?}", ciphertext);
let decrypted_block =
from_slice::<EncryptedWalletV0>(&ciphertext).map_err(|e| NgWalletError::DecryptionError)?;
@ -205,7 +203,7 @@ pub fn open_wallet_with_pazzle(
v0.id,
)?;
log!(
log_info!(
"opening of wallet with pazzle took: {} ms",
opening_pazzle.elapsed().as_millis()
);
@ -287,16 +285,16 @@ pub fn display_pazzle(pazzle: &Vec<u8>) -> Vec<String> {
pub fn gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> ShuffledPazzle {
let mut rng = rand::thread_rng();
let mut category_indices: Vec<u8> = (0..pazzle_length).collect();
//log!("{:?}", category_indices);
//log_info!("{:?}", category_indices);
category_indices.shuffle(&mut rng);
//log!("{:?}", category_indices);
//log_info!("{:?}", category_indices);
let mut emoji_indices: Vec<Vec<u8>> = Vec::with_capacity(pazzle_length.into());
for _ in 0..pazzle_length {
let mut idx: Vec<u8> = (0..15).collect();
//log!("{:?}", idx);
//log_info!("{:?}", idx);
idx.shuffle(&mut rng);
//log!("{:?}", idx);
//log_info!("{:?}", idx);
emoji_indices.push(idx)
}
ShuffledPazzle {
@ -308,9 +306,9 @@ pub fn gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> ShuffledPazzle {
pub fn gen_shuffle_for_pin() -> Vec<u8> {
let mut rng = rand::thread_rng();
let mut digits: Vec<u8> = (0..10).collect();
//log!("{:?}", digits);
//log_info!("{:?}", digits);
digits.shuffle(&mut rng);
//log!("{:?}", digits);
//log_info!("{:?}", digits);
digits
}
@ -324,7 +322,7 @@ pub fn gen_shuffle_for_pin() -> Vec<u8> {
// for i in &mut mnemonic {
// *i = choices.chars().nth(ran.gen_range(0, 72)).unwrap();
// }
// log!("{}", mnemonic.iter().collect::<String>());
// log_info!("{}", mnemonic.iter().collect::<String>());
// }
/// creates a Wallet from a pin, a security text and image (with option to send the bootstrap and wallet to nextgraph.one)
@ -438,14 +436,14 @@ pub async fn create_wallet_v0(
*i = ran.gen_range(0, 15) + (category_indices[ix] << 4);
}
//println!("pazzle {:?}", pazzle);
//log_debug!("pazzle {:?}", pazzle);
let mut mnemonic = [0u16; 12];
for i in &mut mnemonic {
*i = ran.gen_range(0, 2048);
}
//println!("mnemonic {:?}", display_mnemonic(&mnemonic));
//log_debug!("mnemonic {:?}", display_mnemonic(&mnemonic));
//slice_as_array!(&mnemonic, [String; 12])
//.ok_or(NgWalletError::InternalError)?
@ -478,8 +476,8 @@ pub async fn create_wallet_v0(
let mut salt_mnemonic = [0u8; 16];
getrandom::getrandom(&mut salt_mnemonic).map_err(|e| NgWalletError::InternalError)?;
//println!("salt_pazzle {:?}", salt_pazzle);
//println!("salt_mnemonic {:?}", salt_mnemonic);
//log_debug!("salt_pazzle {:?}", salt_pazzle);
//log_debug!("salt_mnemonic {:?}", salt_mnemonic);
let mnemonic_key = derive_key_from_pass(
[transmute_to_bytes(&mnemonic), &params.pin].concat(),
@ -541,7 +539,7 @@ pub async fn create_wallet_v0(
// TODO send bootstrap (if)
// TODO send wallet (if)
log!(
log_info!(
"creating of wallet took: {} ms",
creating_pazzle.elapsed().as_millis()
);
@ -577,11 +575,11 @@ mod tests {
#[test]
fn test_gen_shuffle() {
let shuffle = gen_shuffle_for_pazzle_opening(9);
log!("{:?}", shuffle);
log_info!("{:?}", shuffle);
let shuffle = gen_shuffle_for_pazzle_opening(12);
log!("{:?}", shuffle);
log_info!("{:?}", shuffle);
let shuffle = gen_shuffle_for_pazzle_opening(15);
log!("{:?}", shuffle);
log_info!("{:?}", shuffle);
let digits = gen_shuffle_for_pin();
let digits = gen_shuffle_for_pin();
}
@ -618,26 +616,26 @@ mod tests {
.await
.expect("create_wallet_v0");
log!(
log_info!(
"creation of wallet took: {} ms",
creation.elapsed().as_millis()
);
log!("-----------------------------");
log_info!("-----------------------------");
let mut file = File::create("tests/wallet.ngw").expect("open wallet write file");
let ser_wallet = to_vec(&NgFile::V0(NgFileV0::Wallet(res.wallet.clone()))).unwrap();
file.write_all(&ser_wallet);
log!(
log_info!(
"wallet id: {:?}",
base64_url::encode(&res.wallet.id().slice())
);
log!("pazzle {:?}", display_pazzle(&res.pazzle));
log!("mnemonic {:?}", display_mnemonic(&res.mnemonic));
log!("pin {:?}", pin);
log_info!("pazzle {:?}", display_pazzle(&res.pazzle));
log_info!("mnemonic {:?}", display_mnemonic(&res.mnemonic));
log_info!("pin {:?}", pin);
if let Wallet::V0(v0) = res.wallet {
log!("security text: {:?}", v0.content.security_txt);
log_info!("security text: {:?}", v0.content.security_txt);
let mut file =
File::create("tests/generated_security_image.jpg").expect("open write file");
@ -658,9 +656,9 @@ mod tests {
let w = open_wallet_with_mnemonic(Wallet::V0(v0.clone()), res.mnemonic, pin)
.expect("open with mnemonic");
//println!("encrypted part {:?}", w);
//log_debug!("encrypted part {:?}", w);
log!(
log_info!(
"opening of wallet with mnemonic took: {} ms",
opening_mnemonic.elapsed().as_millis()
);
@ -669,12 +667,12 @@ mod tests {
let opening_pazzle = Instant::now();
let w = open_wallet_with_pazzle(Wallet::V0(v0.clone()), res.pazzle, pin)
.expect("open with pazzle");
log!(
log_info!(
"opening of wallet with pazzle took: {} ms",
opening_pazzle.elapsed().as_millis()
);
}
//println!("encrypted part {:?}", w);
//log_debug!("encrypted part {:?}", w);
}
}
}

@ -12,7 +12,7 @@ use std::fmt;
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use p2p_net::types::NetAddr;
use p2p_net::types::{BrokerServerV0, NetAddr};
use p2p_repo::types::*;
/// WalletId is a PubKey
@ -21,31 +21,11 @@ pub type WalletId = PubKey;
/// BootstrapId is a WalletId
pub type BootstrapId = WalletId;
/// BootstrapServer type
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub enum BoostrapServerTypeV0 {
Localhost(u16), // optional port number
BoxPrivate(Vec<NetAddr>),
BoxPublic(Vec<NetAddr>),
BoxPublicDyn(Vec<NetAddr>), // can be empty
Domain(String), // accepts an option trailing ":port" number
}
/// BootstrapServer details Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BootstrapServerV0 {
/// Network addresses
pub server_type: BoostrapServerTypeV0,
/// peerId of the server
pub peer_id: PubKey,
}
/// Bootstrap content Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BootstrapContentV0 {
/// list of servers, in order of preference
pub servers: Vec<BootstrapServerV0>,
pub servers: Vec<BrokerServerV0>,
}
/// Bootstrap Version 0

@ -8,7 +8,6 @@ description = "CLI command-line interpreter of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
p2p-client-ws = { path = "../p2p-client-ws" }

@ -9,21 +9,21 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use debug_print::*;
use ed25519_dalek::*;
use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership};
use futures::{future, pin_mut, stream, SinkExt, StreamExt};
use p2p_broker::broker_store::config::ConfigMode;
use p2p_repo::object::Object;
use p2p_repo::store::{store_max_value_size, store_valid_value_size, HashMapRepoStore, RepoStore};
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use rand::rngs::OsRng;
use std::collections::HashMap;
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_repo::log::*;
use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp};
@ -50,9 +50,9 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
repo_pubkey,
repo_secret,
);
//println!(">>> add_obj");
println!(" id: {}", obj.id());
//println!(" deps: {:?}", obj.deps());
//log_debug!(">>> add_obj");
log_debug!(" id: {}", obj.id());
//log_debug!(" deps: {:?}", obj.deps());
obj.save(store).unwrap();
obj.reference().unwrap()
}
@ -94,7 +94,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
expiry,
)
.unwrap();
//println!("commit: {}", commit.id().unwrap());
//log_debug!("commit: {}", commit.id().unwrap());
add_obj(
ObjectContent::Commit(commit),
obj_deps,
@ -114,7 +114,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
let deps = vec![];
let expiry = None;
let body = CommitBody::Branch(branch);
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
@ -134,7 +134,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
let expiry = None;
let content = [7u8; 777].to_vec();
let body = CommitBody::Transaction(Transaction::V0(content));
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
@ -153,7 +153,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
) -> ObjectRef {
let expiry = None;
let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
@ -170,12 +170,12 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// repo
let repo_keypair: Keypair = Keypair::generate(&mut rng);
// println!(
// log_debug!(
// "repo private key: ({}) {:?}",
// repo_keypair.secret.as_bytes().len(),
// repo_keypair.secret.as_bytes()
// );
// println!(
// log_debug!(
// "repo public key: ({}) {:?}",
// repo_keypair.public.as_bytes().len(),
// repo_keypair.public.as_bytes()
@ -193,11 +193,11 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// branch
let branch_keypair: Keypair = Keypair::generate(&mut rng);
//println!("branch public key: {:?}", branch_keypair.public.as_bytes());
//log_debug!("branch public key: {:?}", branch_keypair.public.as_bytes());
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes());
let member_keypair: Keypair = Keypair::generate(&mut rng);
//println!("member public key: {:?}", member_keypair.public.as_bytes());
//log_debug!("member public key: {:?}", member_keypair.public.as_bytes());
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes());
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes());
@ -221,18 +221,18 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
tags,
metadata,
);
//println!("branch: {:?}", branch);
println!("branch deps/acks:");
println!("");
println!(" br");
println!(" / \\");
println!(" t1 t2");
println!(" / \\ / \\");
println!(" a3 t4<--t5-->(t1)");
println!(" / \\");
println!(" a6 a7");
println!("");
//log_debug!("branch: {:?}", branch);
log_debug!("branch deps/acks:");
log_debug!("");
log_debug!(" br");
log_debug!(" / \\");
log_debug!(" t1 t2");
log_debug!(" / \\ / \\");
log_debug!(" a3 t4<--t5-->(t1)");
log_debug!(" / \\");
log_debug!(" a6 a7");
log_debug!("");
// commit bodies
@ -247,7 +247,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// create & add commits to store
println!(">> br");
log_debug!(">> br");
let br = add_commit(
branch_body,
member_privkey,
@ -261,7 +261,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> t1");
log_debug!(">> t1");
let t1 = add_commit(
branch_body,
member_privkey,
@ -275,7 +275,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> t2");
log_debug!(">> t2");
let t2 = add_commit(
branch_body,
member_privkey,
@ -289,7 +289,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> a3");
log_debug!(">> a3");
let a3 = add_commit(
branch_body,
member_privkey,
@ -303,7 +303,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> t4");
log_debug!(">> t4");
let t4 = add_commit(
branch_body,
member_privkey,
@ -317,7 +317,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> t5");
log_debug!(">> t5");
let t5 = add_commit(
branch_body,
member_privkey,
@ -331,7 +331,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> a6");
log_debug!(">> a6");
let a6 = add_commit(
branch_body,
member_privkey,
@ -345,7 +345,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
println!(">> a7");
log_debug!(">> a7");
let a7 = add_commit(
branch_body,
member_privkey,
@ -366,7 +366,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// Sending everything to the broker
for (v) in store.get_all() {
//debug_println!("SENDING {}", k);
//log_debug!("SENDING {}", k);
let _ = public_overlay_cnx
.put_block(&v)
.await
@ -403,7 +403,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store,
);
debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len());
log_debug!("LOCAL STORE HAS {} BLOCKS", store.get_len());
// Let's pretend that we know that the head of the branch in the broker is at commits a6 and a7.
// normally it would be the pub/sub that notifies us of those heads.
@ -433,14 +433,14 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
let mut i = 0;
while let Some(b) = synced_blocks_stream.next().await {
debug_println!("GOT BLOCK {}", b.id());
log_debug!("GOT BLOCK {}", b.id());
store.put(&b);
i += 1;
}
debug_println!("SYNCED {} BLOCKS", i);
log_debug!("SYNCED {} BLOCKS", i);
debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len());
log_debug!("LOCAL STORE HAS {} BLOCKS", store.get_len());
// now the client can verify the DAG and each commit. Then update its list of heads.
}
@ -471,7 +471,7 @@ async fn test(
});
let mut public_overlay_cnx = cnx.overlay_connect(&repo, true).await?;
debug_println!("put_block");
log_debug!("put_block");
let my_block_id = public_overlay_cnx
.put_block(&Block::new(
@ -483,7 +483,7 @@ async fn test(
))
.await?;
debug_println!("added block_id to store {}", my_block_id);
log_debug!("added block_id to store {}", my_block_id);
let object_id = public_overlay_cnx
.put_object(
@ -500,7 +500,7 @@ async fn test(
)
.await?;
debug_println!("added object_id to store {}", object_id);
log_debug!("added object_id to store {}", object_id);
let mut my_block_stream = public_overlay_cnx
.get_block(my_block_id, true, None)
@ -508,27 +508,27 @@ async fn test(
//.expect("get_block failed");
while let Some(b) = my_block_stream.next().await {
debug_println!("GOT BLOCK {}", b.id());
log_debug!("GOT BLOCK {}", b.id());
}
let mut my_object_stream = public_overlay_cnx.get_block(object_id, true, None).await?;
//.expect("get_block for object failed");
while let Some(b) = my_object_stream.next().await {
debug_println!("GOT BLOCK {}", b.id());
log_debug!("GOT BLOCK {}", b.id());
}
let object = public_overlay_cnx.get_object(object_id, None).await?;
//.expect("get_object failed");
debug_println!("GOT OBJECT with ID {}", object.id());
log_debug!("GOT OBJECT with ID {}", object.id());
// let object_id = public_overlay_cnx
// .copy_object(object_id, Some(now_timestamp() + 60))
// .await
// .expect("copy_object failed");
// debug_println!("COPIED OBJECT to OBJECT ID {}", object_id);
// log_debug!("COPIED OBJECT to OBJECT ID {}", object_id);
public_overlay_cnx.delete_object(object_id).await?;
//.expect("delete_object failed");
@ -538,7 +538,7 @@ async fn test(
.await
.unwrap_err();
debug_println!("result from get object after delete: {}", res);
log_debug!("result from get object after delete: {}", res);
assert_eq!(res, ProtocolError::NotFound);
//TODO test pin/unpin
@ -551,12 +551,12 @@ async fn test(
}
async fn test_local_connection() {
debug_println!("===== TESTING LOCAL API =====");
log_debug!("===== TESTING LOCAL API =====");
let root = tempfile::Builder::new().prefix("ngcli").tempdir().unwrap();
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let store = LmdbKCVStore::open(root.path(), master_key);
//let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker");
@ -569,7 +569,7 @@ async fn test_local_connection() {
}
async fn test_remote_connection(url: &str) {
debug_println!("===== TESTING REMOTE API =====");
log_debug!("===== TESTING REMOTE API =====");
let (priv_key, pub_key) = generate_keypair();
@ -580,7 +580,7 @@ async fn test_remote_connection(url: &str) {
#[async_std::main]
async fn main() -> std::io::Result<()> {
debug_println!("Starting nextgraph CLI...");
log_debug!("Starting nextgraph CLI...");
//test_local_connection().await;
@ -606,12 +606,12 @@ mod test {
#[async_std::test]
pub async fn test_remote_cnx() -> Result<(), Box<dyn std::error::Error>> {
let keys = gen_keys();
// println!("Public key of node: {:?}", keys.1);
// println!("Private key of node: {:?}", keys.0.as_slice());
// log_debug!("Public key of node: {:?}", keys.1);
// log_debug!("Private key of node: {:?}", keys.0.as_slice());
let pubkey = PubKey::Ed25519PubKey(keys.1);
println!("Public key of node: {:?}", pubkey);
println!("Private key of node: {:?}", keys.0.as_slice());
log_debug!("Public key of node: {:?}", pubkey);
log_debug!("Private key of node: {:?}", keys.0.as_slice());
let thr = task::spawn(run_server_accept_one(
"127.0.0.1",

@ -8,7 +8,16 @@ description = "Daemon of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
p2p-broker = { path = "../p2p-broker" }
p2p-net = { path = "../p2p-net" }
p2p-repo = { path = "../p2p-repo" }
p2p-repo = { path = "../p2p-repo", features = ["server_log_output"] }
async-std = { version = "1.12.0", features = ["attributes"] }
default-net = "0.15"
log = "0.4"
env_logger = "0.10"
clap = { version = "4.3.4", features = ["derive","env"] }
base64-url = "2.0.0"
slice_as_array = "1.1.0"

@ -0,0 +1,34 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use clap::Parser;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub(crate) struct Cli {
/// List all network interfaces available on the host
#[arg(short('i'), long)]
pub list_interfaces: bool,
/// Increase the logging output. once : info, twice : debug, 3 times : trace
#[arg(short, long, action = clap::ArgAction::Count)]
pub verbose: u8,
/// Base path for server home folder containing all persistent files
#[arg(short, long, default_value = ".ng")]
pub base: String,
/// Master key of the server. Should be a base64-url encoded serde serialization of a [u8; 32]. if not provided, a new key will be generated for you
#[arg(short, long, env = "NG_SERVER_KEY")]
pub key: Option<String>,
/// Saves to disk the provided or automatically generated key. Only used if file storage is secure. Alternatives are passing the key at every start with --key or NG_SERVER_KEY env var.
#[arg(long)]
pub save_key: bool,
}

@ -6,18 +6,239 @@
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
#[macro_use]
extern crate slice_as_array;
pub mod types;
mod cli;
use crate::cli::*;
use clap::Parser;
use p2p_broker::server_ws::run_server;
use p2p_net::utils::{gen_keys, Dual25519Keys, Sensitive, U8Array};
use p2p_broker::utils::*;
use p2p_net::utils::{gen_keys, keys_from_bytes, Dual25519Keys, Sensitive, U8Array};
use p2p_net::WS_PORT;
use p2p_repo::log::*;
use p2p_repo::{
types::{PrivKey, PubKey},
utils::{generate_keypair, keypair_from_ed, sign, verify},
};
use std::fs::{read_to_string, write};
use std::io::Read;
use std::io::Write;
use std::io::{BufReader, ErrorKind};
use std::path::{Path, PathBuf};
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum InterfaceType {
Loopback,
Private,
Public,
Invalid,
}
pub fn print_ipv4(ip: &default_net::ip::Ipv4Net) -> String {
format!("{}/{}", ip.addr, ip.prefix_len)
}
pub fn print_ipv6(ip: &default_net::ip::Ipv6Net) -> String {
format!("{}/{}", ip.addr, ip.prefix_len)
}
pub struct Interface {
pub if_type: InterfaceType,
pub name: String,
pub mac_addr: Option<default_net::interface::MacAddr>,
/// List of Ipv4Net for the network interface
pub ipv4: Vec<default_net::ip::Ipv4Net>,
/// List of Ipv6Net for the network interface
pub ipv6: Vec<default_net::ip::Ipv6Net>,
}
pub fn get_interface() -> Vec<Interface> {
let mut res: Vec<Interface> = vec![];
let interfaces = default_net::get_interfaces();
for interface in interfaces {
if interface.ipv4.len() > 0 {
let first_v4 = interface.ipv4[0].addr;
let if_type = if first_v4.is_loopback() {
InterfaceType::Loopback
} else if first_v4.is_private() || first_v4.is_link_local() {
InterfaceType::Private
} else if !first_v4.is_unspecified()
&& !first_v4.is_documentation()
&& !first_v4.is_broadcast()
&& !first_v4.is_multicast()
{
InterfaceType::Public
} else {
InterfaceType::Invalid
};
if if_type == InterfaceType::Invalid {
continue;
}
let interf = Interface {
if_type,
name: interface.name,
mac_addr: interface.mac_addr,
ipv4: interface.ipv4,
ipv6: interface.ipv6,
};
res.push(interf);
}
}
res
}
pub fn print_interfaces() {
let interfaces = get_interface();
for interface in interfaces {
println!("{} \t{:?}", interface.name, interface.if_type);
println!(
"\tIPv4: {}",
interface
.ipv4
.iter()
.map(|ip| print_ipv4(ip))
.collect::<Vec<String>>()
.join(" ")
);
println!(
"\tIPv6: {}",
interface
.ipv6
.iter()
.map(|ip| print_ipv6(ip))
.collect::<Vec<String>>()
.join(" ")
);
if let Some(mac_addr) = interface.mac_addr {
println!("\tMAC: {}", mac_addr);
}
}
}
fn decode_key(key_string: String) -> Result<[u8; 32], ()> {
let vec = base64_url::decode(&key_string).map_err(|_| log_err!("key has invalid content"))?;
Ok(*slice_as_array!(&vec, [u8; 32])
.ok_or(())
.map_err(|_| log_err!("key has invalid content array"))?)
}
#[async_std::main]
async fn main() -> std::io::Result<()> {
println!("Starting NextGraph daemon...");
let args = Cli::parse();
if args.list_interfaces {
println!("list of network interfaces");
print_interfaces();
return Ok(());
}
if std::env::var("RUST_LOG").is_err() {
if args.verbose == 0 {
std::env::set_var("RUST_LOG", "warn");
} else if args.verbose == 1 {
std::env::set_var("RUST_LOG", "info");
} else if args.verbose == 2 {
std::env::set_var("RUST_LOG", "debug");
} else if args.verbose >= 3 {
std::env::set_var("RUST_LOG", "trace");
}
}
env_logger::init();
log_info!("Starting NextGraph daemon (ngd)");
log_debug!("base {:?}", args.base);
let mut path = PathBuf::from(&args.base);
path.push("server");
if !path.is_absolute() {
path = std::env::current_dir().unwrap().join(path);
}
log_debug!("cur {}", std::env::current_dir().unwrap().display());
log_debug!("home {}", path.to_str().unwrap());
std::fs::create_dir_all(path.clone()).unwrap();
// reading key from file, if any
let mut key_path = path.clone();
key_path.push("key");
let key_from_file: Option<[u8; 32]>;
let res = |key_path| -> Result<[u8; 32], &str> {
let file = read_to_string(key_path).map_err(|_| "")?;
decode_key(
file.lines()
.nth(0)
.ok_or("empty file")?
.to_string()
.trim()
.to_string(),
)
.map_err(|_| "invalid file")
}(&key_path);
if res.is_err() && res.unwrap_err().len() > 0 {
log_err!(
"provided key file is incorrect. {}. aborting start",
res.unwrap_err()
);
return Err(ErrorKind::InvalidInput.into());
}
key_from_file = match res {
Err(_) => None,
Ok(k) => Some(k),
};
let keys: [[u8; 32]; 4] = match args.key {
Some(key_string) => {
if key_from_file.is_some() {
log_err!("provided key option will not be used as a key file is already present");
gen_broker_keys(Some(key_from_file.unwrap()))
} else {
let res = decode_key(key_string);
if res.is_err() {
log_err!("provided key is invalid. cannot start");
return Err(ErrorKind::InvalidInput.into());
}
if args.save_key {
let master_key = base64_url::encode(&res.unwrap());
if let Err(e) = write(key_path.clone(), master_key) {
log_err!("cannot save key to file. aborting start");
return Err(e);
}
log_info!("The key has been saved to {}", key_path.to_str().unwrap());
}
gen_broker_keys(Some(res.unwrap()))
}
}
None => {
if key_from_file.is_some() {
gen_broker_keys(Some(key_from_file.unwrap()))
} else {
let res = gen_broker_keys(None);
let master_key = base64_url::encode(&res[0]);
if args.save_key {
if let Err(e) = write(key_path.clone(), master_key) {
log_err!("cannot save key to file. aborting start");
return Err(e);
}
log_info!("The key has been saved to {}", key_path.to_str().unwrap());
} else {
// on purpose we don't log the key, just print it out stdout, as it should be saved in logger's files
println!("YOUR GENERATED KEY IS: {}", master_key);
log_err!("At your request, the key wasn't saved.");
log_err!("provide it again to the next start of ngd with --key option or NG_SERVER_KEY env variable");
}
res
}
}
};
println!("{:?}", keys);
// let keys = gen_keys();
// let pub_key = PubKey::Ed25519PubKey(keys.1);
@ -32,20 +253,23 @@ async fn main() -> std::io::Result<()> {
// let privkey = duals.x25519_priv;
// let pubkey = PubKey::Ed25519PubKey(duals.x25519_public);
// println!("Public key of node: {:?}", keys.1);
// println!("Private key of node: {:?}", keys.0.as_slice());
let pubkey = PubKey::Ed25519PubKey([
95, 155, 249, 202, 41, 105, 71, 51, 206, 126, 9, 84, 132, 92, 60, 7, 74, 179, 46, 21, 21,
242, 171, 27, 249, 79, 76, 176, 168, 43, 83, 2,
]);
let privkey = Sensitive::<[u8; 32]>::from_slice(&[
56, 86, 36, 0, 109, 59, 152, 66, 166, 71, 201, 20, 119, 64, 173, 99, 215, 52, 40, 189, 96,
142, 3, 134, 167, 187, 235, 4, 39, 26, 31, 119,
]);
println!("Public key of node: {:?}", pubkey);
println!("Private key of node: {:?}", privkey.as_slice());
run_server("127.0.0.1", WS_PORT, privkey, pubkey).await?;
// log_debug!("Public key of node: {:?}", keys.1);
// log_debug!("Private key of node: {:?}", keys.0.as_slice());
let (privkey, pubkey) = keys_from_bytes(keys[1]);
// let pubkey = PubKey::Ed25519PubKey([
// 95, 155, 249, 202, 41, 105, 71, 51, 206, 126, 9, 84, 132, 92, 60, 7, 74, 179, 46, 21, 21,
// 242, 171, 27, 249, 79, 76, 176, 168, 43, 83, 2,
// ]);
// let privkey = Sensitive::<[u8; 32]>::from_slice(&[
// 56, 86, 36, 0, 109, 59, 152, 66, 166, 71, 201, 20, 119, 64, 173, 99, 215, 52, 40, 189, 96,
// 142, 3, 134, 167, 187, 235, 4, 39, 26, 31, 119,
// ]);
log_debug!("Public key of node: {:?}", pubkey);
log_debug!("Private key of node: {:?}", privkey.as_slice());
run_server("127.0.0.1", WS_PORT, privkey, pubkey, path).await?;
Ok(())
}

@ -0,0 +1,27 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_broker::types::BrokerOverlayConfig;
use p2p_broker::types::ListenerV0;
use p2p_repo::types::PrivKey;
use serde::{Deserialize, Serialize};
/// DaemonConfig Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DaemonConfigV0 {
/// List of listeners for TCP (HTTP) incoming connections
pub listeners: Vec<ListenerV0>,
pub overlays_config: BrokerOverlayConfig,
}
/// Daemon config
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum DaemonConfig {
V0(DaemonConfigV0),
}

@ -15,7 +15,7 @@ rust-embed = "6"
log = "0.4"
env_logger = "0.10"
stores-lmdb = { path = "../stores-lmdb" }
p2p-repo = { path = "../p2p-repo" }
p2p-repo = { path = "../p2p-repo", features = ["server_log_output"] }
p2p-net = { path = "../p2p-net" }
ng-wallet = { path = "../ng-wallet" }
serde = { version = "1.0.142", features = ["derive"] }
@ -25,5 +25,4 @@ serde-big-array = "0.5.1"
base64-url = "2.0.0"
slice_as_array = "1.1.0"
serde_json = "1.0.96"
debug_print = "1.0.0"
bytes = "1.0"

@ -16,7 +16,6 @@ use p2p_repo::store::StorageError;
use warp::reply::Response;
use warp::{Filter, Reply};
use debug_print::debug_println;
use rust_embed::RustEmbed;
use serde_bare::{from_slice, to_vec};
use serde_json::json;
@ -26,6 +25,7 @@ use std::{env, fs};
use crate::store::wallet_record::*;
use crate::types::*;
use ng_wallet::types::*;
use p2p_repo::log::*;
use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, sign, verify};
use stores_lmdb::kcv_store::LmdbKCVStore;
@ -44,7 +44,7 @@ impl Server {
let bootstrap = add_wallet.bootstrap();
debug_println!("ADDING wallet {}", bootstrap.id());
log_debug!("ADDING wallet {}", bootstrap.id());
verify(
&bootstrap.content_as_bytes(),
@ -85,7 +85,7 @@ impl Server {
}
fn get_wallet(&self, encoded_id: String) -> Result<Response, NgHttpError> {
debug_println!("DOWNLOAD wallet {}", encoded_id);
log_debug!("DOWNLOAD wallet {}", encoded_id);
let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?;
let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?;
let wallet_id = PubKey::Ed25519PubKey(*array);
@ -104,7 +104,7 @@ impl Server {
}
fn get_bootstrap(&self, encoded_id: String) -> Result<Response, NgHttpError> {
debug_println!("DOWNLOAD bootstrap {}", encoded_id);
log_debug!("DOWNLOAD bootstrap {}", encoded_id);
let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?;
let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?;
@ -127,7 +127,7 @@ impl Server {
// pub fn create_wallet_record(&self, bootstrap: &Bootstrap) {
// let wallet = WalletRecord::create(&bootstrap.id(), bootstrap, &self.store).unwrap();
// println!(
// log_debug!(
// "wallet created {}",
// base64_url::encode(&wallet.id().slice())
// );
@ -135,7 +135,7 @@ impl Server {
// pub fn open_wallet_record(&self, wallet_id: &WalletId) -> WalletRecord {
// let wallet2 = WalletRecord::open(wallet_id, &self.store).unwrap();
// println!(
// log_debug!(
// "wallet opened {}",
// base64_url::encode(&wallet2.id().slice())
// );
@ -155,7 +155,7 @@ async fn main() {
dir.push(path_str);
// FIXME: use a real key for encryption at rest
let key: [u8; 32] = [0; 32];
println!("{}", dir.to_str().unwrap());
log_debug!("data directory: {}", dir.to_str().unwrap());
fs::create_dir_all(dir.clone()).unwrap();
let store = LmdbKCVStore::open(&dir, key);
@ -209,7 +209,7 @@ async fn main() {
}
#[cfg(debug_assertions)]
{
println!("ANY ORIGIN");
log_debug!("CORS: any origin");
cors = cors.allow_any_origin();
}
log::info!("Starting server on http://localhost:3030");

@ -8,7 +8,6 @@ description = "P2P Broker module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
p2p-client-ws = { path = "../p2p-client-ws" }
@ -20,9 +19,16 @@ serde_bytes = "0.11.7"
async-std = { version = "1.12.0", features = ["attributes"] }
futures = "0.3.24"
rust-fsm = "0.6.0"
getrandom = "0.2.7"
async-channel = "1.7.1"
tempfile = "3"
hex = "0.4.3"
async-trait = "0.1.64"
async-tungstenite = { version = "0.17.2", features = ["async-std-runtime"] }
blake3 = "1.3.1"
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2.7"
features = ["js"]
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
getrandom = "0.2.7"

@ -1,3 +1,7 @@
pub mod broker_store;
pub mod server_ws;
pub mod types;
pub mod utils;

@ -23,7 +23,6 @@ use crate::broker_store::overlay::Overlay;
use crate::broker_store::peer::Peer;
use crate::broker_store::repostoreinfo::RepoStoreInfo;
use async_std::task;
use debug_print::*;
use futures::future::BoxFuture;
use futures::future::OptionFuture;
use futures::FutureExt;
@ -31,6 +30,7 @@ use futures::Stream;
use p2p_net::actors::*;
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_repo::log::*;
use p2p_repo::object::Object;
use p2p_repo::store::RepoStore;
use p2p_repo::store::StorageError;
@ -119,7 +119,7 @@ impl ProtocolHandler {
Result<Vec<u8>, ProtocolError>,
OptionFuture<BoxFuture<'static, u16>>,
) {
//debug_println!("SERVER PROTOCOL {:?}", &self.protocol);
//log_debug!("SERVER PROTOCOL {:?}", &self.protocol);
match &self.protocol {
ProtocolType::Start => {
let message = serde_bare::from_slice::<StartProtocol>(&frame);
@ -636,9 +636,9 @@ impl BrokerServer {
// // we need to open/create it
// // first let's find it in the KCVStore.overlay table to retrieve its repo_pubkey
// debug_println!("searching for overlayId {}", overlay_id);
// log_debug!("searching for overlayId {}", overlay_id);
// let overlay = Overlay::open(overlay_id, &self.store)?;
// debug_println!("found overlayId {}", overlay_id);
// log_debug!("found overlayId {}", overlay_id);
// let repo_id = overlay.repo()?;
// let repostore_id = RepoStoreId::Repo(repo_id);
// let mut writer = self
@ -676,7 +676,7 @@ impl BrokerServer {
user_id: PubKey,
sig: Sig,
) -> Result<(), ProtocolError> {
debug_println!("ADDING USER {}", user_id);
log_debug!("ADDING USER {}", user_id);
// TODO add is_admin boolean
// TODO check that admin_user is indeed an admin
@ -855,7 +855,7 @@ impl BrokerServer {
}
// TODO use a task to send non blocking (streaming)
let o = obj.ok().unwrap();
//debug_println!("{} BLOCKS ", o.blocks().len());
//log_debug!("{} BLOCKS ", o.blocks().len());
let mut deduplicated: HashSet<BlockId> = HashSet::new();
for block in o.blocks() {
let id = block.id();
@ -878,9 +878,9 @@ impl BrokerServer {
known_heads: &Vec<ObjectId>,
known_commits: &BloomFilter,
) -> Result<async_channel::Receiver<Block>, ProtocolError> {
//debug_println!("heads {:?}", heads);
//debug_println!("known_heads {:?}", known_heads);
//debug_println!("known_commits {:?}", known_commits);
//log_debug!("heads {:?}", heads);
//log_debug!("known_heads {:?}", known_heads);
//log_debug!("known_commits {:?}", known_commits);
self.get_repostore_from_overlay_id(&overlay, |store| {
let (s, r) = async_channel::unbounded::<Block>();
@ -889,7 +889,7 @@ impl BrokerServer {
.map_err(|e| ProtocolError::ObjectParseError)?;
// todo, use a task to send non blocking (streaming)
debug_println!("SYNCING {} COMMITS", res.len());
log_debug!("SYNCING {} COMMITS", res.len());
let mut deduplicated: HashSet<BlockId> = HashSet::new();
@ -925,7 +925,7 @@ impl BrokerServer {
peers: &Vec<PeerAdvert>,
) -> Result<(), ProtocolError> {
// check if this overlay already exists
//debug_println!("SEARCHING OVERLAY");
//log_debug!("SEARCHING OVERLAY");
let overlay_res = Overlay::open(&overlay_id, &self.store);
let overlay = match overlay_res {
Err(StorageError::NotFound) => {
@ -949,24 +949,24 @@ impl BrokerServer {
let key = SymKey::ChaCha20Key(random_buf);
let _ = RepoStoreInfo::create(&overlay_id, &key, &self.store)?; // TODO in case of error, delete the previously created Overlay
//debug_println!("KEY ADDED");
//log_debug!("KEY ADDED");
over
}
Err(e) => return Err(e.into()),
Ok(overlay) => overlay,
};
//debug_println!("OVERLAY FOUND");
//log_debug!("OVERLAY FOUND");
// add the peers to the overlay
for advert in peers {
Peer::update_or_create(advert, &self.store)?;
overlay.add_peer(&advert.peer())?;
}
//debug_println!("PEERS ADDED");
//log_debug!("PEERS ADDED");
// now adding the overlay_id to the account
let account = Account::open(&user, &self.store)?; // TODO in case of error, delete the previously created Overlay
account.add_overlay(&overlay_id)?;
//debug_println!("USER <-> OVERLAY");
//log_debug!("USER <-> OVERLAY");
//TODO: connect to peers

@ -17,21 +17,22 @@ use async_std::sync::Mutex;
use async_std::task;
use async_tungstenite::accept_async;
use async_tungstenite::tungstenite::protocol::Message;
use debug_print::*;
use futures::{SinkExt, StreamExt};
use p2p_client_ws::remote_ws::ConnectionWebSocket;
use p2p_net::broker::*;
use p2p_net::connection::IAccept;
use p2p_net::types::IP;
use p2p_net::utils::Sensitive;
use p2p_repo::log::*;
use p2p_repo::types::{PrivKey, PubKey};
use p2p_repo::utils::generate_keypair;
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use std::fs;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::{thread, time};
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use tempfile::Builder;
pub async fn accept(tcp: TcpStream, peer_priv_key: Sensitive<[u8; 32]>, peer_pub_key: PubKey) {
@ -63,7 +64,7 @@ pub async fn run_server_accept_one(
let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap();
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("data directory: {}", root.path().to_str().unwrap());
let store = LmdbKCVStore::open(root.path(), master_key);
// TODO: remove this part
@ -72,7 +73,7 @@ pub async fn run_server_accept_one(
// let server_arc = Arc::new(server);
let socket = TcpListener::bind(addrs.as_str()).await?;
debug_println!("Listening on {}", addrs.as_str());
log_debug!("Listening on {}", addrs.as_str());
let mut connections = socket.incoming();
let tcp = connections.next().await.unwrap()?;
@ -87,13 +88,18 @@ pub async fn run_server(
port: u16,
peer_priv_key: Sensitive<[u8; 32]>,
peer_pub_key: PubKey,
mut path: PathBuf,
) -> std::io::Result<()> {
let addrs = format!("{}:{}", addr, port);
let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap();
//let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap();
path.push("storage");
std::fs::create_dir_all(path.clone()).unwrap();
//log::info!("Home directory is {}");
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let store = LmdbKCVStore::open(root.path(), master_key);
let store = LmdbKCVStore::open(&path, master_key);
// TODO: remove this part
// let server: BrokerServer =
@ -101,7 +107,7 @@ pub async fn run_server(
// let server_arc = Arc::new(server);
let socket = TcpListener::bind(addrs.as_str()).await?;
debug_println!("Listening on {}", addrs.as_str());
log_debug!("Listening on {}", addrs.as_str());
let mut connections = socket.incoming();
while let Some(tcp) = connections.next().await {

@ -0,0 +1,92 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_net::types::{BindAddress, BrokerServerV0, OverlayId, UserId, IP};
use p2p_repo::types::PrivKey;
use serde::{Deserialize, Serialize};
/// AcceptForwardForV0 type
/// allow answers to connection requests originating from a client behind a reverse proxy
/// Format of last param in the tuple is a list of comma separated hosts or CIDR subnetworks IPv4 and/or IPv6 addresses accepted as X-Forwarded-For
/// Empty string means all addresses are accepted
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub enum AcceptForwardForV0 {
/// X-Forwarded-For not allowed
No,
/// X-Forwarded-For accepted only for clients with private LAN addresses. First param is the bind address of the proxy server
Private((BindAddress, String)),
/// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server
/// domain can take an option port with a trailing `:port`
PublicDomain((String, String)),
/// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server
/// domain can take an option port with a trailing `:port`
/// second param is the privKey of the PeerId of the proxy server, useful when the proxy server is load balancing to several daemons
/// that should all use the same PeerId to answer requests
PublicDomainPeer((String, PrivKey, String)),
PublicDyn((u16, u32, String)), // first param is the port, second param in tuple is the interval for periodic probe of the external IP
PublicStatic((BindAddress, String)),
}
/// DaemonConfig Listener Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ListenerV0 {
/// local interface name to bind to
/// names of interfaces can be retrieved with the --list-interfaces option
/// the string can take an optional trailing option of the form `:3600` for number of seconds
/// for an interval periodic refresh of the actual IP(s) of the interface. Used for dynamic IP interfaces.
pub interface_name: String,
// if to bind to the ipv6 address of the interface
pub ipv6: bool,
/// local port to listen on
pub port: u16,
// will answer a probe coming from private LAN and if is_private, with its own peerId, so that guests on the network will be able to connect.
pub discoverable: bool,
/// Answers to connection requests originating from a direct client, without X-Forwarded-For headers
/// Can be used in combination with a accept_forward_for config, when a local daemon is behind a proxy, and also serves as broker for local apps/webbrowsers
pub accept_direct: bool,
/// X-Forwarded-For config. only valid if IP/interface is localhost or private
pub accept_forward_for: AcceptForwardForV0,
// impl fn is_private()
// returns false if public IP in interface, or if PublicDyn, PublicStatic
// if the ip is local or private, and the forwarding is not PublicDyn nor PublicStatic, (if is_private) then the app is served on HTTP get of /
// an interface with no accept_forward_for and no accept_direct, is de facto, disabled
}
/// Broker Overlay Permission
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum BrokerOverlayPermission {
Nobody,
Anybody,
AllRegisteredUser,
UsersList(Vec<UserId>),
}
/// Broker Overlay Config
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BrokerOverlayConfig {
// list of overlays this config applies to. empty array means applying to all
pub overlays: Vec<OverlayId>,
// Who can ask to join an overlay on the core protocol
pub core: BrokerOverlayPermission,
// Who can connect as a client to this server
pub server: BrokerOverlayPermission,
// if core == Nobody and server == Nobody then the listeners will not be started
// are ExtRequest allowed on the server? this requires the core to be ON.
pub allow_read: bool,
/// an empty list means to forward to the peer known for each overlay.
/// forward becomes the default when core is disabled
pub forward: Vec<BrokerServerV0>,
}

@ -0,0 +1,31 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_repo::log::*;
pub fn gen_broker_keys(key: Option<[u8; 32]>) -> [[u8; 32]; 4] {
let key = match key {
None => {
let mut master_key = [0u8; 32];
log_warn!("gen_broker_keys: No key provided, generating one");
getrandom::getrandom(&mut master_key).expect("getrandom failed");
master_key
}
Some(k) => k,
};
let peerid: [u8; 32];
let wallet: [u8; 32];
let sig: [u8; 32];
peerid = blake3::derive_key("NextGraph Broker BLAKE3 key PeerId privkey", &key);
wallet = blake3::derive_key("NextGraph Broker BLAKE3 key wallet encryption", &key);
sig = blake3::derive_key("NextGraph Broker BLAKE3 key config signature", &key);
[key, peerid, wallet, sig]
}

@ -8,7 +8,6 @@ description = "P2P Client module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
chacha20 = "0.9.0"

@ -17,7 +17,7 @@ macro_rules! before {
.map_err(|_e| ProtocolError::ActorError)?;
let $request_id = $addr.actor_id();
//debug_println!("actor ID {}", $request_id);
//log_debug!("actor ID {}", $request_id);
{
let mut map = $self.actors.write().expect("RwLock poisoned");
@ -28,7 +28,7 @@ macro_rules! before {
macro_rules! after {
( $self:expr, $request_id:ident, $addr:ident, $receiver:ident, $reply:ident ) => {
//debug_println!("waiting for reply");
//log_debug!("waiting for reply");
$addr.wait_for_stop().await; // TODO add timeout and close connection if there's no reply
let r = $receiver.await;
@ -36,7 +36,7 @@ macro_rules! after {
return Err(ProtocolError::Closing);
}
let $reply = r.unwrap();
//debug_println!("reply arrived {:?}", $reply);
//log_debug!("reply arrived {:?}", $reply);
{
let mut map = $self.actors.write().expect("RwLock poisoned");
map.remove(&$request_id);

@ -17,7 +17,6 @@ use async_std::net::TcpStream;
use async_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
use async_tungstenite::tungstenite::protocol::CloseFrame;
use async_tungstenite::WebSocketStream;
use debug_print::*;
use async_std::sync::Mutex;
use futures::io::Close;
@ -26,10 +25,10 @@ use futures::{FutureExt, SinkExt};
use async_std::task;
use p2p_net::errors::*;
use p2p_net::log;
use p2p_net::types::*;
use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend, Sender, Sensitive};
use p2p_net::{connection::*, WS_PORT};
use p2p_repo::log::*;
use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp};
@ -57,7 +56,7 @@ impl IConnect for ConnectionWebSocket {
match (res) {
Err(e) => {
debug_println!("Cannot connect: {:?}", e);
log_debug!("Cannot connect: {:?}", e);
Err(NetError::ConnectionError)
}
Ok((mut websocket, _)) => {
@ -67,14 +66,14 @@ impl IConnect for ConnectionWebSocket {
let mut shutdown = cnx.set_shutdown();
let join = task::spawn(async move {
debug_println!("START of WS loop");
log_debug!("START of WS loop");
let res = ws_loop(websocket, s, r).await;
if res.is_err() {
let _ = shutdown.send(res.err().unwrap()).await;
}
debug_println!("END of WS loop");
log_debug!("END of WS loop");
});
cnx.start(config).await;
@ -103,14 +102,14 @@ impl IAccept for ConnectionWebSocket {
let mut shutdown = cnx.set_shutdown();
let join = task::spawn(async move {
debug_println!("START of WS loop");
log_debug!("START of WS loop");
let res = ws_loop(socket, s, r).await;
if res.is_err() {
let _ = shutdown.send(res.err().unwrap()).await;
}
debug_println!("END of WS loop");
log_debug!("END of WS loop");
});
Ok(cnx)
}
@ -122,7 +121,7 @@ async fn close_ws(
code: u16,
reason: &str,
) -> Result<(), NetError> {
log!("close_ws {:?}", code);
log_info!("close_ws {:?}", code);
let cmd = if code == 1000 {
ConnectionCommand::Close
@ -133,7 +132,7 @@ async fn close_ws(
} else {
ConnectionCommand::Error(NetError::try_from(code - 4949).unwrap())
};
log!("sending to read loop {:?}", cmd);
log_info!("sending to read loop {:?}", cmd);
let _ = futures::SinkExt::send(receiver, cmd).await;
stream
@ -162,11 +161,11 @@ async fn ws_loop(
select! {
r = stream.next().fuse() => match r {
Some(Ok(msg)) => {
//log!("GOT MESSAGE {:?}", msg);
//log_info!("GOT MESSAGE {:?}", msg);
if msg.is_close() {
if let Message::Close(Some(cf)) = msg {
log!("CLOSE from remote with closeframe: {}",cf.reason);
log_info!("CLOSE from remote with closeframe: {}",cf.reason);
let last_command = match cf.code {
CloseCode::Normal =>
ConnectionCommand::Close,
@ -185,7 +184,7 @@ async fn ws_loop(
}
else {
let _ = futures::SinkExt::send(receiver, ConnectionCommand::Close).await;
log!("CLOSE from remote");
log_info!("CLOSE from remote");
}
return Ok(ProtocolError::Closing);
} else {
@ -193,12 +192,12 @@ async fn ws_loop(
.map_err(|_e| NetError::IoError)?;
}
},
Some(Err(e)) => {log!("GOT ERROR {:?}",e);return Err(NetError::WsError);},
Some(Err(e)) => {log_info!("GOT ERROR {:?}",e);return Err(NetError::WsError);},
None => break
},
s = sender.next().fuse() => match s {
Some(msg) => {
//log!("SENDING MESSAGE {:?}", msg);
//log_info!("SENDING MESSAGE {:?}", msg);
match msg {
ConnectionCommand::Msg(m) => {
futures::SinkExt::send(&mut stream,Message::binary(serde_bare::to_vec(&m)?)).await.map_err(|_e| NetError::IoError)?;
@ -223,7 +222,7 @@ async fn ws_loop(
match inner_loop(&mut ws, sender, &mut receiver).await {
Ok(proto_err) => {
if proto_err == ProtocolError::Closing {
log!("ProtocolError::Closing");
log_info!("ProtocolError::Closing");
let _ = ws.close(None).await;
} else if proto_err == ProtocolError::NoError {
close_ws(&mut ws, &mut receiver, 1000, "").await?;
@ -259,7 +258,7 @@ mod test {
use p2p_net::errors::NetError;
use p2p_net::types::IP;
use p2p_net::utils::{spawn_and_log_error, ResultSend};
use p2p_net::{log, sleep};
use p2p_repo::log::*;
use p2p_repo::utils::generate_keypair;
use std::net::IpAddr;
use std::str::FromStr;
@ -280,7 +279,7 @@ mod test {
let (client_priv_key, client_pub_key) = generate_keypair();
let (user_priv_key, user_pub_key) = generate_keypair();
log!("start connecting");
log_info!("start connecting");
{
let res = BROKER
.write()
@ -299,7 +298,7 @@ mod test {
}),
)
.await;
log!("broker.connect : {:?}", res);
log_info!("broker.connect : {:?}", res);
res.expect("assume the connection succeeds");
}
@ -308,7 +307,7 @@ mod test {
async fn timer_close(remote_peer_id: DirectPeerId) -> ResultSend<()> {
async move {
sleep!(std::time::Duration::from_secs(3));
log!("timeout");
log_info!("timeout");
BROKER
.write()
.await

@ -15,10 +15,10 @@ use futures::FutureExt;
use futures::{future, pin_mut, select, stream, SinkExt, StreamExt};
use p2p_net::connection::*;
use p2p_net::errors::*;
use p2p_net::log;
use p2p_net::types::*;
use p2p_net::utils::*;
use p2p_net::WS_PORT;
use p2p_repo::log::*;
use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp};
use std::sync::Arc;
@ -47,7 +47,7 @@ impl IConnect for ConnectionWebSocket {
let url = format!("ws://{}:{}", ip, WS_PORT);
let (mut ws, wsio) = WsMeta::connect(url, None).await.map_err(|e| {
//log!("{:?}", e);
//log_info!("{:?}", e);
NetError::ConnectionError
})?;
@ -85,7 +85,7 @@ async fn ws_loop(
select! {
r = stream.next().fuse() => match r {
Some(msg) => {
log!("GOT MESSAGE {:?}", msg);
log_info!("GOT MESSAGE {:?}", msg);
if let WsMessage::Binary(b) = msg {
receiver.send(ConnectionCommand::Msg(serde_bare::from_slice::<ProtocolMessage>(&b)?)).await
.map_err(|_e| NetError::IoError)?;
@ -98,11 +98,11 @@ async fn ws_loop(
},
s = sender.next().fuse() => match s {
Some(msg) => {
log!("SENDING MESSAGE {:?}", msg);
log_info!("SENDING MESSAGE {:?}", msg);
match msg {
ConnectionCommand::Msg(m) => {
stream.send(WsMessage::Binary(serde_bare::to_vec(&m)?)).await.map_err(|e| { log!("{:?}",e); return NetError::IoError;})?;
stream.send(WsMessage::Binary(serde_bare::to_vec(&m)?)).await.map_err(|e| { log_info!("{:?}",e); return NetError::IoError;})?;
},
ConnectionCommand::Error(e) => {
@ -122,7 +122,7 @@ async fn ws_loop(
}
Ok(ProtocolError::NoError)
}
log!("START of WS loop");
log_info!("START of WS loop");
let mut events = ws
.observe(ObserveConfig::default())
//.observe(Filter::Pointer(WsEvent::is_closed).into())
@ -132,9 +132,9 @@ async fn ws_loop(
Ok(proto_err) => {
if proto_err == ProtocolError::NoError {
let _ = ws.close_code(1000).await; //.map_err(|_e| NetError::WsError)?;
log!("CLOSED GRACEFULLY");
log_info!("CLOSED GRACEFULLY");
} else {
log!("PROTOCOL ERR");
log_info!("PROTOCOL ERR");
let mut code = proto_err.clone() as u16;
if code > 949 {
code = ProtocolError::OtherError as u16;
@ -150,12 +150,12 @@ async fn ws_loop(
.await;
//.map_err(|_e| NetError::WsError)?;
//return Err(Box::new(e));
log!("ERR {:?}", e);
log_info!("ERR {:?}", e);
}
}
let last_event = events.next().await;
log!("WS closed {:?}", last_event.clone());
log_info!("WS closed {:?}", last_event.clone());
let last_command = match last_event {
None => ConnectionCommand::Close,
Some(WsEvent::Open) => ConnectionCommand::Error(NetError::WsError), // this should never happen
@ -185,6 +185,6 @@ async fn ws_loop(
.await
.map_err(|_e| NetError::IoError)?;
log!("END of WS loop");
log_info!("END of WS loop");
Ok(())
}

@ -8,7 +8,6 @@ description = "P2P network module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
@ -26,9 +25,6 @@ noise-protocol = "0.2.0-rc1"
noise-rust-crypto = "0.6.0-rc.1"
ed25519-dalek = "1.0.1"
[target.'cfg(target_arch = "wasm32")'.dependencies]
gloo-timers = "0.2.6"
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2.7"
features = ["js"]

@ -18,7 +18,7 @@ use std::convert::From;
use std::sync::Arc;
use crate::utils::{spawn_and_log_error, Receiver, ResultSend, Sender};
use crate::{connection::*, errors::ProtocolError, log, types::ProtocolMessage};
use crate::{connection::*, errors::ProtocolError, types::ProtocolMessage};
use std::marker::PhantomData;
impl TryFrom<ProtocolMessage> for () {

@ -15,7 +15,6 @@ use crate::errors::*;
use crate::types::*;
use crate::utils::spawn_and_log_error;
use crate::utils::{Receiver, ResultSend, Sender};
use crate::{log, sleep};
use async_std::stream::StreamExt;
use async_std::sync::{Arc, RwLock};
use futures::channel::mpsc;
@ -23,6 +22,7 @@ use futures::SinkExt;
use noise_protocol::U8Array;
use noise_rust_crypto::sensitive::Sensitive;
use once_cell::sync::Lazy;
use p2p_repo::log::*;
use p2p_repo::object::Object;
use p2p_repo::object::ObjectParseError;
use p2p_repo::store::HashMapRepoStore;
@ -96,7 +96,7 @@ impl Broker {
// TODO
let (mut tx, rx) = mpsc::unbounded::<Block>();
//log!("cur {}", std::env::current_dir().unwrap().display());
//log_info!("cur {}", std::env::current_dir().unwrap().display());
//Err(ProtocolError::AccessDenied)
// let f = std::fs::File::open(
@ -168,10 +168,10 @@ impl Broker {
.unwrap();
async fn send(mut tx: Sender<Commit>, commit: Commit) -> ResultSend<()> {
while let Ok(_) = tx.send(commit.clone()).await {
log!("sending");
log_info!("sending");
sleep!(std::time::Duration::from_secs(3));
}
log!("end of sending");
log_info!("end of sending");
Ok(())
}
spawn_and_log_error(send(tx.clone(), commit));
@ -251,7 +251,7 @@ impl Broker {
async fn timer_shutdown(timeout: std::time::Duration) -> ResultSend<()> {
async move {
sleep!(timeout);
log!("timeout for shutdown");
log_info!("timeout for shutdown");
let _ = BROKER
.write()
.await
@ -334,8 +334,8 @@ impl Broker {
) -> ResultSend<()> {
async move {
let res = join.next().await;
log!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id);
log!("REMOVED");
log_info!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id);
log_info!("REMOVED");
BROKER.write().await.remove(&remote_peer_id);
}
.await;
@ -362,7 +362,7 @@ impl Broker {
// TODO check that not already connected to peer
// IpAddr::from_str("127.0.0.1");
log!("CONNECTING");
log_info!("CONNECTING");
let mut connection = cnx
.open(
ip,
@ -405,7 +405,7 @@ impl Broker {
) -> ResultSend<()> {
async move {
let res = join.next().await;
log!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id);
log_info!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id);
if res.is_some() {
// we intend to reconnect
let mut broker = BROKER.write().await;
@ -414,10 +414,10 @@ impl Broker {
// let result = broker
// .connect(cnx, ip, core, peer_pubk, peer_privk, remote_peer_id)
// .await;
// log!("SOCKET RECONNECTION {:?} {:?}", result, &remote_peer_id);
// log_info!("SOCKET RECONNECTION {:?} {:?}", result, &remote_peer_id);
// TODO: deal with error and incremental backoff
} else {
log!("REMOVED");
log_info!("REMOVED");
BROKER.write().await.remove(&remote_peer_id);
}
}
@ -454,10 +454,10 @@ impl Broker {
pub fn print_status(&self) {
self.peers.iter().for_each(|(peerId, peerInfo)| {
log!("PEER in BROKER {:?} {:?}", peerId, peerInfo);
log_info!("PEER in BROKER {:?} {:?}", peerId, peerInfo);
});
self.direct_connections.iter().for_each(|(ip, directCnx)| {
log!("direct_connection in BROKER {:?} {:?}", ip, directCnx)
log_info!("direct_connection in BROKER {:?} {:?}", ip, directCnx)
});
}
}

@ -14,27 +14,25 @@
//! This is the trait
//!
use futures::channel::mpsc;
use futures::{
ready,
ready, select,
stream::Stream,
task::{Context, Poll},
Future,
select, FutureExt,
Future, FutureExt,
};
use futures::channel::mpsc;
use std::pin::Pin;
use std::{collections::HashSet, fmt::Debug};
use crate::errors::*;
use crate::types::*;
use async_broadcast::{broadcast, Receiver};
use debug_print::*;
use futures::{pin_mut, stream, Sink, SinkExt, StreamExt};
use p2p_repo::log::*;
use p2p_repo::object::*;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use crate::errors::*;
use crate::types::*;
#[async_trait::async_trait]
pub trait BrokerConnection {
@ -88,8 +86,10 @@ pub trait BrokerConnection {
let overlay: OverlayId = match public {
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()),
false => {
let key: [u8; blake3::OUT_LEN] =
blake3::derive_key("NextGraph OverlayId BLAKE3 key", repo_link.secret().slice());
let key: [u8; blake3::OUT_LEN] = blake3::derive_key(
"NextGraph OverlayId BLAKE3 key",
repo_link.secret().slice(),
);
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice());
Digest::Blake3Digest32(*keyed_hash.as_bytes())
}
@ -105,7 +105,7 @@ pub trait BrokerConnection {
match res {
Err(e) => {
if e == ProtocolError::OverlayNotJoined {
debug_println!("OverlayNotJoined");
log_debug!("OverlayNotJoined");
let res2 = self
.process_overlay_request(
overlay,
@ -125,7 +125,7 @@ pub trait BrokerConnection {
Ok(()) => {}
}
debug_println!("OverlayConnectionClient ready");
log_debug!("OverlayConnectionClient ready");
Ok(overlay)
}
}
@ -143,7 +143,11 @@ impl<'a, T> OverlayConnectionClient<'a, T>
where
T: BrokerConnection,
{
pub fn create( broker: &'a mut T, overlay: OverlayId, repo_link: RepoLink) -> OverlayConnectionClient<'a, T> {
pub fn create(
broker: &'a mut T,
overlay: OverlayId,
repo_link: RepoLink,
) -> OverlayConnectionClient<'a, T> {
OverlayConnectionClient {
broker,
repo_link,
@ -155,8 +159,10 @@ where
let overlay: OverlayId = match public {
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()),
false => {
let key: [u8; blake3::OUT_LEN] =
blake3::derive_key("NextGraph OverlayId BLAKE3 key", repo_link.secret().slice());
let key: [u8; blake3::OUT_LEN] = blake3::derive_key(
"NextGraph OverlayId BLAKE3 key",
repo_link.secret().slice(),
);
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice());
Digest::Blake3Digest32(*keyed_hash.as_bytes())
}
@ -299,7 +305,7 @@ where
repo_pubkey,
repo_secret,
);
debug_println!("object has {} blocks", obj.blocks().len());
log_debug!("object has {} blocks", obj.blocks().len());
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in obj.blocks() {
let id = block.id();

@ -19,17 +19,16 @@ use crate::actor::{Actor, SoS};
use crate::actors::*;
use crate::errors::NetError;
use crate::errors::ProtocolError;
use crate::log;
use crate::types::*;
use crate::utils::*;
use async_std::stream::StreamExt;
use async_std::sync::Mutex;
use debug_print::debug_println;
use futures::{channel::mpsc, select, Future, FutureExt, SinkExt};
use noise_protocol::U8Array;
use noise_protocol::{patterns::noise_xk, CipherState, HandshakeState};
use noise_rust_crypto::sensitive::Sensitive;
use noise_rust_crypto::*;
use p2p_repo::log::*;
use p2p_repo::types::{PrivKey, PubKey};
use p2p_repo::utils::{sign, verify};
use serde_bare::from_slice;
@ -205,7 +204,7 @@ impl NoiseFSM {
}
pub async fn send(&mut self, msg: ProtocolMessage) -> Result<(), ProtocolError> {
log!("SENDING: {:?}", msg);
log_info!("SENDING: {:?}", msg);
if self.noise_cipher_state_enc.is_some() {
let cipher = self.encrypt(msg)?;
self.sender
@ -251,7 +250,7 @@ impl NoiseFSM {
}
}
if msg_opt.is_some() {
log!("RECEIVED: {:?}", msg_opt.as_ref().unwrap());
log_info!("RECEIVED: {:?}", msg_opt.as_ref().unwrap());
}
match self.state {
// TODO verify that ID is zero
@ -314,12 +313,12 @@ impl NoiseFSM {
let mut payload =
handshake.read_message_vec(noise.data()).map_err(|e| {
debug_println!("{:?}", e);
log_debug!("{:?}", e);
ProtocolError::NoiseHandshakeFailed
})?;
payload = handshake.write_message_vec(&payload).map_err(|e| {
debug_println!("{:?}", e);
log_debug!("{:?}", e);
ProtocolError::NoiseHandshakeFailed
})?;
@ -347,7 +346,7 @@ impl NoiseFSM {
.map_err(|e| ProtocolError::NoiseHandshakeFailed)?;
payload = handshake.write_message_vec(&payload).map_err(|e| {
debug_println!("{:?}", e);
log_debug!("{:?}", e);
ProtocolError::NoiseHandshakeFailed
})?;
@ -489,7 +488,7 @@ impl NoiseFSM {
if (result.is_err()) {
return Err(result);
}
log!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the server side");
log_info!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the server side");
self.state = FSMstate::AuthResult;
return Ok(StepReply::NONE);
}
@ -509,7 +508,7 @@ impl NoiseFSM {
self.state = FSMstate::AuthResult;
log!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the client side");
log_info!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the client side");
return Ok(StepReply::NONE);
}
@ -612,7 +611,7 @@ impl ConnectionBase {
ConnectionCommand::Close
| ConnectionCommand::Error(_)
| ConnectionCommand::ProtocolError(_) => {
log!("EXIT READ LOOP because : {:?}", msg);
log_info!("EXIT READ LOOP because : {:?}", msg);
break;
}
ConnectionCommand::Msg(proto_msg) => {
@ -677,7 +676,7 @@ impl ConnectionBase {
}
}
}
log!("END OF READ LOOP");
log_info!("END OF READ LOOP");
Ok(())
}
@ -720,7 +719,7 @@ impl ConnectionBase {
// }
pub async fn close(&mut self) {
log!("closing...");
log_info!("closing...");
self.send(ConnectionCommand::Close).await;
}
@ -772,8 +771,8 @@ mod test {
use crate::actor::*;
use crate::actors::*;
use crate::log;
use crate::types::*;
use p2p_repo::log::*;
use std::any::{Any, TypeId};
#[async_std::test]
@ -781,14 +780,14 @@ mod test {
#[async_std::test]
pub async fn test_typeid() {
log!(
log_info!(
"{:?}",
ClientHello::Noise3(Noise::V0(NoiseV0 { data: vec![] })).type_id()
);
let a = Noise::V0(NoiseV0 { data: [].to_vec() });
log!("{:?}", a.type_id());
log!("{:?}", TypeId::of::<Noise>());
log!("{:?}", ClientHello::Local.type_id());
log!("{:?}", TypeId::of::<ClientHello>());
log_info!("{:?}", a.type_id());
log_info!("{:?}", TypeId::of::<Noise>());
log_info!("{:?}", ClientHello::Local.type_id());
log_info!("{:?}", TypeId::of::<ClientHello>());
}
}

@ -8,6 +8,8 @@
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
#[macro_use]
extern crate p2p_repo;
pub mod types;
@ -28,51 +30,3 @@ pub mod utils;
pub mod tests;
pub static WS_PORT: u16 = 1025;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
// The `console.log` is quite polymorphic, so we can bind it with multiple
// signatures. Note that we need to use `js_name` to ensure we always call
// `log` in JS.
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_u32(a: u32);
// Multiple arguments too!
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_many(a: &str, b: &str);
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
}
#[cfg(not(target_arch = "wasm32"))]
#[macro_export]
macro_rules! log {
($($t:tt)*) => (debug_print::debug_println!($($t)*))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (gloo_timers::future::sleep($($t)*).await)
}
#[cfg(not(target_arch = "wasm32"))]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (std::thread::sleep($($t)*))
}

@ -23,6 +23,37 @@ use crate::{actor::EActor, actors::*, errors::ProtocolError};
use p2p_repo::types::*;
use serde::{Deserialize, Serialize};
//
// Broker common types
//
/// Bind address
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BindAddress {
pub port: u16,
pub ip: IP,
}
/// BrokerServerTypeV0 type
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum BrokerServerTypeV0 {
Localhost(u16), // optional port number
BoxPrivate(Vec<BindAddress>),
BoxPublic(Vec<BindAddress>),
BoxPublicDyn(Vec<BindAddress>), // can be empty
Domain(String), // accepts an option trailing ":port" number
}
/// BrokerServer details Version 0
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BrokerServerV0 {
/// Network addresses
pub server_type: BrokerServerTypeV0,
/// peerId of the server
pub peer_id: PubKey,
}
//
// COMMON TYPES FOR MESSAGES
//

@ -9,13 +9,14 @@
* according to those terms.
*/
use crate::log;
use async_std::task;
use ed25519_dalek::*;
use futures::{channel::mpsc, select, Future, FutureExt, SinkExt};
pub use noise_protocol::U8Array;
use noise_protocol::DH;
pub use noise_rust_crypto::sensitive::Sensitive;
use p2p_repo::log::*;
use p2p_repo::types::PubKey;
#[cfg(target_arch = "wasm32")]
pub fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
@ -24,7 +25,7 @@ where
{
task::spawn_local(async move {
if let Err(e) = fut.await {
log!("EXCEPTION {}", e)
log_err!("EXCEPTION {}", e)
}
})
}
@ -41,7 +42,7 @@ where
{
task::spawn(async move {
if let Err(e) = fut.await {
eprintln!("{}", e)
log_err!("{}", e)
}
})
}
@ -49,6 +50,16 @@ where
pub type Sender<T> = mpsc::UnboundedSender<T>;
pub type Receiver<T> = mpsc::UnboundedReceiver<T>;
pub fn keys_from_bytes(secret_key: [u8; 32]) -> (Sensitive<[u8; 32]>, PubKey) {
let sk = SecretKey::from_bytes(&secret_key).unwrap();
let pk: PublicKey = (&sk).into();
let pub_key = PubKey::Ed25519PubKey(pk.to_bytes());
let priv_key = Sensitive::<[u8; 32]>::from_slice(&secret_key);
(priv_key, pub_key)
}
pub fn gen_keys() -> (Sensitive<[u8; 32]>, [u8; 32]) {
let pri = noise_rust_crypto::X25519::genkey();
let publ = noise_rust_crypto::X25519::pubkey(&pri);

@ -7,6 +7,10 @@ authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P repository module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[features]
server_log_output = []
[dependencies]
blake3 = "1.3.1"
chacha20 = "0.9.0"
@ -16,8 +20,15 @@ serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
fastbloom-rs = "0.5.3"
debug_print = "1.0.0"
hex = "0.4.3"
futures = "0.3.24"
base64-url = "2.0.0"
web-time = "0.2.0"
wasm-bindgen = "0.2"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
debug_print = "1.0.0"
log = "0.4"
[target.'cfg(target_arch = "wasm32")'.dependencies]
gloo-timers = "0.2.6"

@ -11,7 +11,7 @@
//! Branch of a Repository
use debug_print::*;
use crate::log::*;
use std::collections::{HashMap, HashSet};
use fastbloom_rs::{BloomFilter as Filter, Membership};
@ -113,9 +113,9 @@ impl Branch {
their_filter: &BloomFilter,
store: &impl RepoStore,
) -> Result<Vec<ObjectId>, ObjectParseError> {
//debug_println!(">> sync_req");
//debug_println!(" our_heads: {:?}", our_heads);
//debug_println!(" their_heads: {:?}", their_heads);
//log_debug!(">> sync_req");
//log_debug!(" our_heads: {:?}", our_heads);
//log_debug!(" their_heads: {:?}", their_heads);
/// Load `Commit` `Object`s of a `Branch` from the `RepoStore` starting from the given `Object`,
/// and collect `ObjectId`s starting from `our_heads` towards `their_heads`
@ -126,12 +126,12 @@ impl Branch {
visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>,
) -> Result<bool, ObjectParseError> {
//debug_println!(">>> load_branch: {}", cobj.id());
//log_debug!(">>> load_branch: {}", cobj.id());
let id = cobj.id();
// root has no deps
let is_root = cobj.is_root();
//debug_println!(" deps: {:?}", cobj.deps());
//log_debug!(" deps: {:?}", cobj.deps());
// check if this commit object is present in their_heads
let mut their_head_found = their_heads.contains(&id);
@ -172,7 +172,7 @@ impl Branch {
let mut visited = HashSet::new();
let their_head_found =
load_branch(&cobj, store, their_heads, &mut visited, &mut missing)?;
//debug_println!("<<< load_branch: {}", their_head_found);
//log_debug!("<<< load_branch: {}", their_head_found);
ours.extend(visited); // add if one of their_heads found
}
@ -181,15 +181,15 @@ impl Branch {
let cobj = Object::load(*id, None, store)?;
let mut visited = HashSet::new();
let their_head_found = load_branch(&cobj, store, &[], &mut visited, &mut missing)?;
//debug_println!("<<< load_branch: {}", their_head_found);
//log_debug!("<<< load_branch: {}", their_head_found);
theirs.extend(visited); // add if one of their_heads found
}
let mut result = &ours - &theirs;
//debug_println!("!! ours: {:?}", ours);
//debug_println!("!! theirs: {:?}", theirs);
//debug_println!("!! result: {:?}", result);
//log_debug!("!! ours: {:?}", ours);
//log_debug!("!! theirs: {:?}", theirs);
//log_debug!("!! result: {:?}", result);
// remove their_commits from result
let filter = Filter::from_u8_array(their_filter.f.as_slice(), their_filter.k.into());
@ -202,7 +202,7 @@ impl Branch {
}
}
}
//debug_println!("!! result filtered: {:?}", result);
//log_debug!("!! result filtered: {:?}", result);
Ok(Vec::from_iter(result))
}
}
@ -239,9 +239,9 @@ mod test {
repo_pubkey,
repo_secret,
);
println!(">>> add_obj");
println!(" id: {:?}", obj.id());
println!(" deps: {:?}", obj.deps());
log_debug!(">>> add_obj");
log_debug!(" id: {:?}", obj.id());
log_debug!(" deps: {:?}", obj.deps());
obj.save(store).unwrap();
obj.reference().unwrap()
}
@ -283,7 +283,7 @@ mod test {
expiry,
)
.unwrap();
//println!("commit: {:?}", commit);
//log_debug!("commit: {:?}", commit);
add_obj(
ObjectContent::Commit(commit),
obj_deps,
@ -303,7 +303,7 @@ mod test {
let deps = vec![];
let expiry = None;
let body = CommitBody::Branch(branch);
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
@ -323,7 +323,7 @@ mod test {
let expiry = None;
let content = [7u8; 777].to_vec();
let body = CommitBody::Transaction(Transaction::V0(content));
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
@ -342,7 +342,7 @@ mod test {
) -> ObjectRef {
let expiry = None;
let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
@ -359,12 +359,12 @@ mod test {
// repo
let repo_keypair: Keypair = Keypair::generate(&mut rng);
println!(
log_debug!(
"repo private key: ({}) {:?}",
repo_keypair.secret.as_bytes().len(),
repo_keypair.secret.as_bytes()
);
println!(
log_debug!(
"repo public key: ({}) {:?}",
repo_keypair.public.as_bytes().len(),
repo_keypair.public.as_bytes()
@ -376,11 +376,11 @@ mod test {
// branch
let branch_keypair: Keypair = Keypair::generate(&mut rng);
println!("branch public key: {:?}", branch_keypair.public.as_bytes());
log_debug!("branch public key: {:?}", branch_keypair.public.as_bytes());
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes());
let member_keypair: Keypair = Keypair::generate(&mut rng);
println!("member public key: {:?}", member_keypair.public.as_bytes());
log_debug!("member public key: {:?}", member_keypair.public.as_bytes());
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes());
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes());
@ -404,19 +404,19 @@ mod test {
tags,
metadata,
);
//println!("branch: {:?}", branch);
//log_debug!("branch: {:?}", branch);
fn print_branch() {
println!("branch deps/acks:");
println!("");
println!(" br");
println!(" / \\");
println!(" t1 t2");
println!(" / \\ / \\");
println!(" a3 t4<--t5-->(t1)");
println!(" / \\");
println!(" a6 a7");
println!("");
log_debug!("branch deps/acks:");
log_debug!("");
log_debug!(" br");
log_debug!(" / \\");
log_debug!(" t1 t2");
log_debug!(" / \\ / \\");
log_debug!(" a3 t4<--t5-->(t1)");
log_debug!(" / \\");
log_debug!(" a6 a7");
log_debug!("");
}
print_branch();
@ -434,7 +434,7 @@ mod test {
// create & add commits to store
println!(">> br");
log_debug!(">> br");
let br = add_commit(
branch_body,
member_privkey,
@ -448,7 +448,7 @@ mod test {
&mut store,
);
println!(">> t1");
log_debug!(">> t1");
let t1 = add_commit(
branch_body,
member_privkey,
@ -462,7 +462,7 @@ mod test {
&mut store,
);
println!(">> t2");
log_debug!(">> t2");
let t2 = add_commit(
branch_body,
member_privkey,
@ -476,7 +476,7 @@ mod test {
&mut store,
);
println!(">> a3");
log_debug!(">> a3");
let a3 = add_commit(
branch_body,
member_privkey,
@ -490,7 +490,7 @@ mod test {
&mut store,
);
println!(">> t4");
log_debug!(">> t4");
let t4 = add_commit(
branch_body,
member_privkey,
@ -504,7 +504,7 @@ mod test {
&mut store,
);
println!(">> t5");
log_debug!(">> t5");
let t5 = add_commit(
branch_body,
member_privkey,
@ -518,7 +518,7 @@ mod test {
&mut store,
);
println!(">> a6");
log_debug!(">> a6");
let a6 = add_commit(
branch_body,
member_privkey,
@ -532,7 +532,7 @@ mod test {
&mut store,
);
println!(">> a7");
log_debug!(">> a7");
let a7 = add_commit(
branch_body,
member_privkey,
@ -562,10 +562,10 @@ mod test {
};
print_branch();
println!(">> sync_req");
println!(" our_heads: [a3, t5, a6, a7]");
println!(" their_heads: [a3, t5]");
println!(" their_commits: [br, t1, t2, a3, t5, a6]");
log_debug!(">> sync_req");
log_debug!(" our_heads: [a3, t5, a6, a7]");
log_debug!(" their_heads: [a3, t5]");
log_debug!(" their_commits: [br, t1, t2, a3, t5, a6]");
let ids = Branch::sync_req(
&[a3.id, t5.id, a6.id, a7.id],

@ -11,12 +11,12 @@
//! Commit
use debug_print::*;
use ed25519_dalek::*;
use std::collections::HashSet;
use std::iter::FromIterator;
use crate::log::*;
use crate::object::*;
use crate::store::*;
use crate::types::*;
@ -271,7 +271,7 @@ impl Commit {
/// Verify if the commit's `body` and dependencies (`deps` & `acks`) are available in the `store`
pub fn verify_deps(&self, store: &impl RepoStore) -> Result<Vec<ObjectId>, CommitLoadError> {
//debug_println!(">> verify_deps: #{}", self.seq());
//log_debug!(">> verify_deps: #{}", self.seq());
/// Load `Commit`s of a `Branch` from the `RepoStore` starting from the given `Commit`,
/// and collect missing `ObjectId`s
fn load_branch(
@ -280,7 +280,7 @@ impl Commit {
visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>,
) -> Result<(), CommitLoadError> {
//debug_println!(">>> load_branch: #{}", commit.seq());
//log_debug!(">>> load_branch: #{}", commit.seq());
// the commit verify_deps() was called on may not have an ID set,
// but the commits loaded from store should have it
match commit.id() {
@ -302,7 +302,7 @@ impl Commit {
}
Err(e) => return Err(e),
};
debug_println!("!!! is_root: {}", is_root);
log_debug!("!!! is_root: {}", is_root);
// load deps
if !is_root {
@ -360,12 +360,12 @@ mod test {
pub fn test_commit() {
let mut csprng = OsRng {};
let keypair: Keypair = Keypair::generate(&mut csprng);
println!(
log_debug!(
"private key: ({}) {:?}",
keypair.secret.as_bytes().len(),
keypair.secret.as_bytes()
);
println!(
log_debug!(
"public key: ({}) {:?}",
keypair.public.as_bytes().len(),
keypair.public.as_bytes()
@ -392,7 +392,7 @@ mod test {
priv_key, pub_key, seq, branch, deps, acks, refs, metadata, body_ref, expiry,
)
.unwrap();
println!("commit: {:?}", commit);
log_debug!("commit: {:?}", commit);
let store = HashMapRepoStore::new();
let metadata = [66u8; 64].to_vec();
@ -415,9 +415,9 @@ mod test {
tags,
metadata,
);
//println!("branch: {:?}", branch);
//log_debug!("branch: {:?}", branch);
let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body);
//log_debug!("body: {:?}", body);
match commit.load_body(&store) {
Ok(_b) => panic!("Body should not exist"),
@ -428,7 +428,7 @@ mod test {
}
let content = commit.content();
println!("content: {:?}", content);
log_debug!("content: {:?}", content);
commit.verify_sig().expect("Invalid signature");
commit

@ -19,3 +19,162 @@ pub mod errors;
pub mod kcv_store;
pub mod site;
pub mod log {
#[cfg(not(target_arch = "wasm32"))]
pub use debug_print::debug_println;
#[cfg(target_arch = "wasm32")]
pub use gloo_timers;
#[cfg(not(target_arch = "wasm32"))]
pub use log;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
#[wasm_bindgen(js_namespace = console)]
pub fn warn(s: &str);
#[wasm_bindgen(js_namespace = console)]
pub fn error(s: &str);
// The `console.log` is quite polymorphic, so we can bind it with multiple
// signatures. Note that we need to use `js_name` to ensure we always call
// `log` in JS.
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_u32(a: u32);
// Multiple arguments too!
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_many(a: &str, b: &str);
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_info {
($($t:tt)*) => (println!("INFO:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_err {
($($t:tt)*) => (println!("ERR:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_warn {
($($t:tt)*) => (println!("WARN:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => (debug_println!("DEBUG:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => (debug_println!("TRACE:{}",format!($($t)*)))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_info {
($($t:tt)*) => (log::info!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_err {
($($t:tt)*) => (log::error!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_warn {
($($t:tt)*) => (log::warn!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => (log::debug!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => (log::trace!($($t)*))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log_info {
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log_err {
($($t:tt)*) => (error(&format_args!($($t)*).to_string()))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log_warn {
($($t:tt)*) => (warn(&format_args!($($t)*).to_string()))
}
#[cfg(all(debug_assertions, target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => (log(format!("DEBUG:{}",&format_args!($($t)*).to_string())))
}
#[cfg(all(debug_assertions, target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => (log(format!("TRACE:{}",&format_args!($($t)*).to_string())))
}
#[cfg(all(not(debug_assertions), target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => {};
}
#[cfg(all(not(debug_assertions), target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => {};
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (gloo_timers::future::sleep($($t)*).await)
}
#[cfg(not(target_arch = "wasm32"))]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (std::thread::sleep($($t)*))
}
pub use log_debug;
pub use log_err;
pub use log_info;
pub use log_trace;
pub use log_warn;
pub use sleep;
}

@ -13,11 +13,10 @@
use std::collections::{HashMap, HashSet};
use debug_print::*;
use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::ChaCha20;
use crate::log::*;
use crate::store::*;
use crate::types::*;
@ -101,9 +100,9 @@ impl Object {
cipher.apply_keystream(&mut content_enc_slice);
let key = SymKey::ChaCha20Key(key.clone());
let block = Block::new(children, deps, expiry, content_enc, Some(key));
//debug_println!(">>> make_block:");
//debug_println!("!! id: {:?}", obj.id());
//debug_println!("!! children: ({}) {:?}", children.len(), children);
//log_debug!(">>> make_block:");
//log_debug!("!! id: {:?}", obj.id());
//log_debug!("!! children: ({}) {:?}", children.len(), children);
block
}
@ -163,7 +162,7 @@ impl Object {
expiry,
));
}
//debug_println!("parents += {}", parents.len());
//log_debug!("parents += {}", parents.len());
if 1 < parents.len() {
let mut great_parents =
@ -194,7 +193,7 @@ impl Object {
) -> Object {
// create blocks by chunking + encrypting content
let valid_block_size = store_valid_value_size(block_size);
println!("valid_block_size {}", valid_block_size);
log_debug!("valid_block_size {}", valid_block_size);
let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA;
let mut blocks: Vec<Block> = vec![];
@ -418,7 +417,7 @@ impl Object {
leaves: &mut Option<&mut Vec<Block>>,
obj_content: &mut Option<&mut Vec<u8>>,
) -> Result<(), ObjectParseError> {
/*debug_println!(
/*log_debug!(
">>> collect_leaves: #{}..{}",
parent_index,
parent_index + parents.len() - 1
@ -427,13 +426,13 @@ impl Object {
let mut i = parent_index;
for (id, key) in parents {
//debug_println!("!!! parent: #{}", i);
//log_debug!("!!! parent: #{}", i);
let block = &blocks[i];
i += 1;
// verify object ID
if *id != block.id() {
debug_println!("Invalid ObjectId.\nExp: {:?}\nGot: {:?}", *id, block.id());
log_debug!("Invalid ObjectId.\nExp: {:?}\nGot: {:?}", *id, block.id());
return Err(ObjectParseError::InvalidBlockId);
}
@ -455,7 +454,7 @@ impl Object {
match serde_bare::from_slice(content_dec.as_slice()) {
Ok(c) => content = c,
Err(e) => {
debug_println!("Block deserialize error: {}", e);
log_debug!("Block deserialize error: {}", e);
return Err(ObjectParseError::BlockDeserializeError);
}
}
@ -464,13 +463,13 @@ impl Object {
match content {
BlockContentV0::InternalNode(keys) => {
if keys.len() != b.children.len() {
debug_println!(
log_debug!(
"Invalid keys length: got {}, expected {}",
keys.len(),
b.children.len()
);
debug_println!("!!! children: {:?}", b.children);
debug_println!("!!! keys: {:?}", keys);
log_debug!("!!! children: {:?}", b.children);
log_debug!("!!! keys: {:?}", keys);
return Err(ObjectParseError::InvalidKeys);
}
@ -547,7 +546,7 @@ impl Object {
match serde_bare::from_slice(obj_content.as_slice()) {
Ok(c) => Ok(c),
Err(e) => {
debug_println!("Object deserialize error: {}", e);
log_debug!("Object deserialize error: {}", e);
Err(ObjectParseError::ObjectDeserializeError)
}
}
@ -609,18 +608,18 @@ mod test {
repo_secret,
);
println!("obj.id: {:?}", obj.id());
println!("obj.key: {:?}", obj.key());
println!("obj.blocks.len: {:?}", obj.blocks().len());
log_debug!("obj.id: {:?}", obj.id());
log_debug!("obj.key: {:?}", obj.key());
log_debug!("obj.blocks.len: {:?}", obj.blocks().len());
let mut i = 0;
for node in obj.blocks() {
println!("#{}: {:?}", i, node.id());
log_debug!("#{}: {:?}", i, node.id());
let mut file = std::fs::File::create(format!("tests/{}.ng", node.id()))
.expect("open block write file");
let ser_file = serde_bare::to_vec(node).unwrap();
file.write_all(&ser_file);
println!("{:?}", ser_file);
log_debug!("{:?}", ser_file);
i += 1;
}
@ -652,14 +651,14 @@ mod test {
repo_secret,
);
println!("obj.id: {:?}", obj.id());
println!("obj.key: {:?}", obj.key());
println!("obj.deps: {:?}", obj.deps());
println!("obj.blocks.len: {:?}", obj.blocks().len());
log_debug!("obj.id: {:?}", obj.id());
log_debug!("obj.key: {:?}", obj.key());
log_debug!("obj.deps: {:?}", obj.deps());
log_debug!("obj.blocks.len: {:?}", obj.blocks().len());
let mut i = 0;
for node in obj.blocks() {
println!("#{}: {:?}", i, node.id());
log_debug!("#{}: {:?}", i, node.id());
i += 1;
}
@ -677,13 +676,13 @@ mod test {
let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap();
println!("obj2.id: {:?}", obj2.id());
println!("obj2.key: {:?}", obj2.key());
println!("obj2.deps: {:?}", obj2.deps());
println!("obj2.blocks.len: {:?}", obj2.blocks().len());
log_debug!("obj2.id: {:?}", obj2.id());
log_debug!("obj2.key: {:?}", obj2.key());
log_debug!("obj2.deps: {:?}", obj2.deps());
log_debug!("obj2.blocks.len: {:?}", obj2.blocks().len());
let mut i = 0;
for node in obj2.blocks() {
println!("#{}: {:?}", i, node.id());
log_debug!("#{}: {:?}", i, node.id());
i += 1;
}
@ -699,13 +698,13 @@ mod test {
let obj3 = Object::load(obj.id(), None, &store).unwrap();
println!("obj3.id: {:?}", obj3.id());
println!("obj3.key: {:?}", obj3.key());
println!("obj3.deps: {:?}", obj3.deps());
println!("obj3.blocks.len: {:?}", obj3.blocks().len());
log_debug!("obj3.id: {:?}", obj3.id());
log_debug!("obj3.key: {:?}", obj3.key());
log_debug!("obj3.deps: {:?}", obj3.deps());
log_debug!("obj3.blocks.len: {:?}", obj3.blocks().len());
let mut i = 0;
for node in obj3.blocks() {
println!("#{}: {:?}", i, node.id());
log_debug!("#{}: {:?}", i, node.id());
i += 1;
}
@ -743,7 +742,7 @@ mod test {
content: vec![],
}));
let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap();
println!("empty file size: {}", empty_file_ser.len());
log_debug!("empty file size: {}", empty_file_ser.len());
let size = store_max_value_size()
- EMPTY_BLOCK_SIZE
@ -751,7 +750,7 @@ mod test {
- BLOCK_ID_SIZE * deps.len()
- empty_file_ser.len()
- DATA_VARINT_EXTRA;
println!("file size: {}", size);
log_debug!("file size: {}", size);
let content = ObjectContent::File(File::V0(FileV0 {
content_type: "".into(),
@ -759,7 +758,7 @@ mod test {
content: vec![99; size],
}));
let content_ser = serde_bare::to_vec(&content).unwrap();
println!("content len: {}", content_ser.len());
log_debug!("content len: {}", content_ser.len());
let expiry = Some(2u32.pow(31));
let max_object_size = store_max_value_size();
@ -776,18 +775,18 @@ mod test {
repo_secret,
);
println!("root_id: {:?}", object.id());
println!("root_key: {:?}", object.key().unwrap());
println!("nodes.len: {:?}", object.blocks().len());
//println!("root: {:?}", tree.root());
//println!("nodes: {:?}", object.blocks);
log_debug!("root_id: {:?}", object.id());
log_debug!("root_key: {:?}", object.key().unwrap());
log_debug!("nodes.len: {:?}", object.blocks().len());
//log_debug!("root: {:?}", tree.root());
//log_debug!("nodes: {:?}", object.blocks);
assert_eq!(object.blocks.len(), 1);
}
#[test]
pub fn test_block_size() {
let max_block_size = store_max_value_size();
println!("max_object_size: {}", max_block_size);
log_debug!("max_object_size: {}", max_block_size);
let id = Digest::Blake3Digest32([0u8; 32]);
let key = SymKey::ChaCha20Key([0u8; 32]);
@ -880,35 +879,35 @@ mod test {
);
let root_two_ser = serde_bare::to_vec(&root_two).unwrap();
println!(
log_debug!(
"range of valid value sizes {} {}",
store_valid_value_size(0),
store_max_value_size()
);
println!(
log_debug!(
"max_data_payload_of_object: {}",
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA
);
println!(
log_debug!(
"max_data_payload_depth_1: {}",
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA - MAX_DEPS_SIZE
);
println!(
log_debug!(
"max_data_payload_depth_2: {}",
MAX_ARITY_ROOT * MAX_DATA_PAYLOAD_SIZE
);
println!(
log_debug!(
"max_data_payload_depth_3: {}",
MAX_ARITY_ROOT * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE
);
let max_arity_leaves = (max_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
println!("max_arity_leaves: {}", max_arity_leaves);
log_debug!("max_arity_leaves: {}", max_arity_leaves);
assert_eq!(max_arity_leaves, MAX_ARITY_LEAVES);
assert_eq!(
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA,
@ -917,15 +916,15 @@ mod test {
let max_arity_root =
(max_block_size - EMPTY_BLOCK_SIZE - MAX_DEPS_SIZE - BIG_VARINT_EXTRA * 2)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
println!("max_arity_root: {}", max_arity_root);
log_debug!("max_arity_root: {}", max_arity_root);
assert_eq!(max_arity_root, MAX_ARITY_ROOT);
println!("store_max_value_size: {}", leaf_full_data_ser.len());
log_debug!("store_max_value_size: {}", leaf_full_data_ser.len());
assert_eq!(leaf_full_data_ser.len(), max_block_size);
println!("leaf_empty: {}", leaf_empty_ser.len());
log_debug!("leaf_empty: {}", leaf_empty_ser.len());
assert_eq!(leaf_empty_ser.len(), EMPTY_BLOCK_SIZE);
println!("root_depsref: {}", root_depsref_ser.len());
log_debug!("root_depsref: {}", root_depsref_ser.len());
assert_eq!(root_depsref_ser.len(), EMPTY_ROOT_SIZE_DEPSREF);
println!("internal_max: {}", internal_max_ser.len());
log_debug!("internal_max: {}", internal_max_ser.len());
assert_eq!(
internal_max_ser.len(),
EMPTY_BLOCK_SIZE
@ -933,22 +932,22 @@ mod test {
+ MAX_ARITY_LEAVES * (BLOCK_ID_SIZE + BLOCK_KEY_SIZE)
);
assert!(internal_max_ser.len() < max_block_size);
println!("internal_one: {}", internal_one_ser.len());
log_debug!("internal_one: {}", internal_one_ser.len());
assert_eq!(
internal_one_ser.len(),
EMPTY_BLOCK_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE
);
println!("internal_two: {}", internal_two_ser.len());
log_debug!("internal_two: {}", internal_two_ser.len());
assert_eq!(
internal_two_ser.len(),
EMPTY_BLOCK_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE
);
println!("root_one: {}", root_one_ser.len());
log_debug!("root_one: {}", root_one_ser.len());
assert_eq!(
root_one_ser.len(),
EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE
);
println!("root_two: {}", root_two_ser.len());
log_debug!("root_two: {}", root_two_ser.len());
assert_eq!(
root_two_ser.len(),
EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE
@ -961,9 +960,9 @@ mod test {
// let arity_512: usize =
// (object_size_512 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE);
// println!("1-page object_size: {}", object_size_1);
// println!("512-page object_size: {}", object_size_512);
// println!("max arity of 1-page object: {}", arity_1);
// println!("max arity of 512-page object: {}", arity_512);
// log_debug!("1-page object_size: {}", object_size_1);
// log_debug!("512-page object_size: {}", object_size_512);
// log_debug!("max arity of 1-page object: {}", arity_1);
// log_debug!("max arity of 512-page object: {}", arity_512);
}
}

@ -27,12 +27,12 @@ pub fn generate_null_keypair() -> (PrivKey, PubKey) {
secret: sk,
};
// println!(
// log_debug!(
// "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes()
// );
// println!(
// log_debug!(
// "public key: ({}) {:?}",
// keypair.public.as_bytes().len(),
// keypair.public.as_bytes()
@ -45,12 +45,12 @@ pub fn generate_null_keypair() -> (PrivKey, PubKey) {
}
pub fn keypair_from_ed(secret: SecretKey, public: PublicKey) -> (PrivKey, PubKey) {
// println!(
// log_debug!(
// "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes()
// );
// println!(
// log_debug!(
// "public key: ({}) {:?}",
// keypair.public.as_bytes().len(),
// keypair.public.as_bytes()
@ -94,12 +94,12 @@ pub fn verify(content: &Vec<u8>, sig: Sig, pub_key: PubKey) -> Result<(), NgErro
pub fn generate_keypair() -> (PrivKey, PubKey) {
let mut csprng = OsRng {};
let keypair: Keypair = Keypair::generate(&mut csprng);
// println!(
// log_debug!(
// "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes()
// );
// println!(
// log_debug!(
// "public key: ({}) {:?}",
// keypair.public.as_bytes().len(),
// keypair.public.as_bytes()

@ -9,7 +9,6 @@ repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
p2p-repo = { path = "../p2p-repo" }
debug_print = "1.0.0"
serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0"
tempfile = "3"

@ -12,7 +12,7 @@ use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use debug_print::*;
use p2p_repo::log::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::RwLockReadGuard;
@ -383,7 +383,7 @@ impl LmdbKCVStore {
.unwrap();
let env = shared_rkv.read().unwrap();
println!("created env with LMDB Version: {}", env.version());
log_info!("created env with LMDB Version: {}", env.version());
let main_store = env.open_multi("main", StoreOptions::create()).unwrap();

@ -11,7 +11,7 @@ use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use debug_print::*;
use p2p_repo::log::*;
use std::path::Path;
use std::sync::{Arc, RwLock};
@ -99,7 +99,7 @@ impl RepoStore for LmdbRepoStore {
Err(_e) => Err(StorageError::InvalidValue),
Ok(o) => {
if o.id() != *block_id {
debug_println!(
log_debug!(
"Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}",
block_id,
o.id(),
@ -201,7 +201,6 @@ impl RepoStore for LmdbRepoStore {
writer.commit().unwrap();
Ok((block, slice.len()))
}
}
impl LmdbRepoStore {
@ -217,7 +216,7 @@ impl LmdbRepoStore {
.unwrap();
let env = shared_rkv.read().unwrap();
println!(
log_debug!(
"created env with LMDB Version: {} key: {}",
env.version(),
hex::encode(&key)
@ -352,7 +351,7 @@ impl LmdbRepoStore {
if !meta.pin {
// we add an entry to recently_used_store with now
println!("adding to LRU");
log_debug!("adding to LRU");
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
}
}
@ -362,7 +361,7 @@ impl LmdbRepoStore {
synced: true,
last_used: now,
};
println!("adding to LRU also");
log_debug!("adding to LRU also");
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
}
}
@ -396,7 +395,7 @@ impl LmdbRepoStore {
while let Some(Ok(mut sub_iter)) = iter.next() {
while let Some(Ok(k)) = sub_iter.next() {
//println!("removing {:?} {:?}", k.0, k.1);
//log_debug!("removing {:?} {:?}", k.0, k.1);
let block_id = serde_bare::from_slice::<ObjectId>(k.1).unwrap();
block_ids.push(block_id);
}
@ -430,7 +429,7 @@ impl LmdbRepoStore {
}
for block_id in block_ids {
let (block, block_size) = self.del(&block_id).unwrap();
println!("removed {:?}", block_id);
log_debug!("removed {:?}", block_id);
total += block_size;
if total >= size {
break;
@ -468,25 +467,25 @@ impl LmdbRepoStore {
fn list_all(&self) {
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
println!("MAIN");
log_debug!("MAIN");
let mut iter = self.main_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter.next() {
println!("{:?} {:?}", entry.0, entry.1)
log_debug!("{:?} {:?}", entry.0, entry.1)
}
println!("META");
log_debug!("META");
let mut iter2 = self.meta_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter2.next() {
println!("{:?} {:?}", entry.0, entry.1)
log_debug!("{:?} {:?}", entry.0, entry.1)
}
println!("EXPIRY");
log_debug!("EXPIRY");
let mut iter3 = self.expiry_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter3.next() {
println!("{:?} {:?}", entry.0, entry.1)
log_debug!("{:?} {:?}", entry.0, entry.1)
}
println!("LRU");
log_debug!("LRU");
let mut iter4 = self.recently_used_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter4.next() {
println!("{:?} {:?}", entry.0, entry.1)
log_debug!("{:?} {:?}", entry.0, entry.1)
}
}
}
@ -494,6 +493,7 @@ impl LmdbRepoStore {
mod test {
use crate::repo_store::LmdbRepoStore;
use p2p_repo::log::*;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
@ -511,7 +511,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let mut now = now_timestamp();
now -= 200;
@ -525,14 +525,14 @@ mod test {
None,
);
let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", x, block_id);
log_debug!("#{} -> objId {:?}", x, block_id);
store
.has_been_synced(&block_id, Some(now + x as u32))
.unwrap();
}
let ret = store.remove_least_used(200);
println!("removed {}", ret);
log_debug!("removed {}", ret);
assert_eq!(ret, 208)
//store.list_all();
@ -544,7 +544,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let mut now = now_timestamp();
now -= 200;
@ -558,7 +558,7 @@ mod test {
None,
);
let obj_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", x, obj_id);
log_debug!("#{} -> objId {:?}", x, obj_id);
store.set_pin(&obj_id, true).unwrap();
store
.has_been_synced(&obj_id, Some(now + x as u32))
@ -566,7 +566,7 @@ mod test {
}
let ret = store.remove_least_used(200);
println!("removed {}", ret);
log_debug!("removed {}", ret);
assert_eq!(ret, 0);
store.list_all();
@ -601,7 +601,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let now = now_timestamp();
@ -619,7 +619,7 @@ mod test {
now + 10,
];
let mut block_ids: Vec<ObjectId> = Vec::with_capacity(11);
println!("now {}", now);
log_debug!("now {}", now);
let mut i = 0u8;
for expiry in list {
@ -632,7 +632,7 @@ mod test {
None,
);
let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", i, block_id);
log_debug!("#{} -> objId {:?}", i, block_id);
block_ids.push(block_id);
i += 1;
}
@ -655,7 +655,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let now = now_timestamp();
@ -668,7 +668,7 @@ mod test {
now - 2, //#5 should be removed, and above
];
let mut block_ids: Vec<ObjectId> = Vec::with_capacity(6);
println!("now {}", now);
log_debug!("now {}", now);
let mut i = 0u8;
for expiry in list {
@ -681,7 +681,7 @@ mod test {
None,
);
let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", i, block_id);
log_debug!("#{} -> objId {:?}", i, block_id);
block_ids.push(block_id);
i += 1;
}
@ -702,7 +702,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let store = LmdbRepoStore::open(root.path(), key);
store.remove_expired().unwrap();
}
@ -715,7 +715,7 @@ mod test {
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
@ -730,7 +730,7 @@ mod test {
let block_id = store.put(&block).unwrap();
assert_eq!(block_id, block.id());
println!("ObjectId: {:?}", block_id);
log_debug!("ObjectId: {:?}", block_id);
assert_eq!(
block_id,
Digest::Blake3Digest32([
@ -741,7 +741,7 @@ mod test {
let block_res = store.get(&block_id).unwrap();
println!("Block: {:?}", block_res);
log_debug!("Block: {:?}", block_res);
assert_eq!(block_res.id(), block.id());
}
@ -755,7 +755,7 @@ mod test {
{
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
log_debug!("{}", root.path().to_str().unwrap());
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let shared_rkv = manager
@ -766,7 +766,7 @@ mod test {
.unwrap();
let env = shared_rkv.read().unwrap();
println!("LMDB Version: {}", env.version());
log_debug!("LMDB Version: {}", env.version());
let store = env.open_single("testdb", StoreOptions::create()).unwrap();
@ -823,12 +823,12 @@ mod test {
let reader = env.read().expect("reader");
let stat = store.stat(&reader).unwrap();
println!("LMDB stat page_size : {}", stat.page_size());
println!("LMDB stat depth : {}", stat.depth());
println!("LMDB stat branch_pages : {}", stat.branch_pages());
println!("LMDB stat leaf_pages : {}", stat.leaf_pages());
println!("LMDB stat overflow_pages : {}", stat.overflow_pages());
println!("LMDB stat entries : {}", stat.entries());
log_debug!("LMDB stat page_size : {}", stat.page_size());
log_debug!("LMDB stat depth : {}", stat.depth());
log_debug!("LMDB stat branch_pages : {}", stat.branch_pages());
log_debug!("LMDB stat leaf_pages : {}", stat.leaf_pages());
log_debug!("LMDB stat overflow_pages : {}", stat.overflow_pages());
log_debug!("LMDB stat entries : {}", stat.entries());
}
// {
@ -838,17 +838,17 @@ mod test {
// let reader = env.read().expect("reader");
// // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`.
// // println!("Get int {:?}", store.get(&reader, "int").unwrap());
// // println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
// // println!("Get float {:?}", store.get(&reader, "float").unwrap());
// // println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
// // println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
// // println!("Get string {:?}", store.get(&reader, "string").unwrap());
// // println!("Get json {:?}", store.get(&reader, "json").unwrap());
// println!("Get blob {:?}", store.get(&reader, "blob").unwrap());
// // log_debug!("Get int {:?}", store.get(&reader, "int").unwrap());
// // log_debug!("Get uint {:?}", store.get(&reader, "uint").unwrap());
// // log_debug!("Get float {:?}", store.get(&reader, "float").unwrap());
// // log_debug!("Get instant {:?}", store.get(&reader, "instant").unwrap());
// // log_debug!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
// // log_debug!("Get string {:?}", store.get(&reader, "string").unwrap());
// // log_debug!("Get json {:?}", store.get(&reader, "json").unwrap());
// log_debug!("Get blob {:?}", store.get(&reader, "blob").unwrap());
// // Retrieving a non-existent value returns `Ok(None)`.
// println!(
// log_debug!(
// "Get non-existent value {:?}",
// store.get(&reader, "non-existent").unwrap()
// );
@ -864,7 +864,7 @@ mod test {
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// writer.abort();
// let reader = env.read().expect("reader");
// println!(
// log_debug!(
// "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap()
// );
@ -879,7 +879,7 @@ mod test {
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// }
// let reader = env.read().expect("reader");
// println!(
// log_debug!(
// "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap()
// );
@ -898,26 +898,26 @@ mod test {
// // In the code above, "foo" and "bar" were put into the store, then "foo" was
// // deleted so only "bar" will return a result when the database is queried via the
// // writer.
// println!(
// log_debug!(
// "It should be None! ({:?})",
// store.get(&writer, "foo").unwrap()
// );
// println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
// log_debug!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
// // But a reader won't see that change until the write transaction is committed.
// {
// let reader = env.read().expect("reader");
// println!("Get foo {:?}", store.get(&reader, "foo").unwrap());
// println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// log_debug!("Get foo {:?}", store.get(&reader, "foo").unwrap());
// log_debug!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// }
// writer.commit().unwrap();
// {
// let reader = env.read().expect("reader");
// println!(
// log_debug!(
// "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap()
// );
// println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// log_debug!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// }
// // Committing a transaction consumes the writer, preventing you from reusing it by
@ -943,11 +943,11 @@ mod test {
// // {
// // let reader = env.read().expect("reader");
// // println!(
// // log_debug!(
// // "It should be None! ({:?})",
// // store.get(&reader, "foo").unwrap()
// // );
// // println!(
// // log_debug!(
// // "It should be None! ({:?})",
// // store.get(&reader, "bar").unwrap()
// // );
@ -956,17 +956,17 @@ mod test {
let stat = env.stat().unwrap();
let info = env.info().unwrap();
println!("LMDB info map_size : {}", info.map_size());
println!("LMDB info last_pgno : {}", info.last_pgno());
println!("LMDB info last_txnid : {}", info.last_txnid());
println!("LMDB info max_readers : {}", info.max_readers());
println!("LMDB info num_readers : {}", info.num_readers());
println!("LMDB stat page_size : {}", stat.page_size());
println!("LMDB stat depth : {}", stat.depth());
println!("LMDB stat branch_pages : {}", stat.branch_pages());
println!("LMDB stat leaf_pages : {}", stat.leaf_pages());
println!("LMDB stat overflow_pages : {}", stat.overflow_pages());
println!("LMDB stat entries : {}", stat.entries());
log_debug!("LMDB info map_size : {}", info.map_size());
log_debug!("LMDB info last_pgno : {}", info.last_pgno());
log_debug!("LMDB info last_txnid : {}", info.last_txnid());
log_debug!("LMDB info max_readers : {}", info.max_readers());
log_debug!("LMDB info num_readers : {}", info.num_readers());
log_debug!("LMDB stat page_size : {}", stat.page_size());
log_debug!("LMDB stat depth : {}", stat.depth());
log_debug!("LMDB stat branch_pages : {}", stat.branch_pages());
log_debug!("LMDB stat leaf_pages : {}", stat.leaf_pages());
log_debug!("LMDB stat overflow_pages : {}", stat.overflow_pages());
log_debug!("LMDB stat entries : {}", stat.entries());
}
// We reopen the env and data to see if it was well saved to disk.
{
@ -979,13 +979,13 @@ mod test {
.unwrap();
let env = shared_rkv.read().unwrap();
println!("LMDB Version: {}", env.version());
log_debug!("LMDB Version: {}", env.version());
let mut store = env.open_single("testdb", StoreOptions::default()).unwrap(); //StoreOptions::create()
{
let reader = env.read().expect("reader");
println!(
log_debug!(
"It should be baz! ({:?})",
store.get(&reader, "bar").unwrap()
);

Loading…
Cancel
Save