refactor log macros, ngd master key taken from args or file

Niko 2 years ago
parent a39490a30c
commit 7dc47ffeb0
  1. 1
      .gitignore
  2. 236
      Cargo.lock
  3. 1
      ng-app/src-tauri/Cargo.toml
  4. 20
      ng-app/src-tauri/src/lib.rs
  5. 1
      ng-sdk-js/Cargo.toml
  6. 19
      ng-sdk-js/src/lib.rs
  7. 1
      ng-wallet/Cargo.toml
  8. 66
      ng-wallet/src/lib.rs
  9. 24
      ng-wallet/src/types.rs
  10. 1
      ngcli/Cargo.toml
  11. 110
      ngcli/src/main.rs
  12. 11
      ngd/Cargo.toml
  13. 34
      ngd/src/cli.rs
  14. 256
      ngd/src/main.rs
  15. 27
      ngd/src/types.rs
  16. 3
      ngone/Cargo.toml
  17. 16
      ngone/src/main.rs
  18. 10
      p2p-broker/Cargo.toml
  19. 4
      p2p-broker/src/lib.rs
  20. 30
      p2p-broker/src/server.rs
  21. 26
      p2p-broker/src/server_ws.rs
  22. 92
      p2p-broker/src/types.rs
  23. 31
      p2p-broker/src/utils.rs
  24. 1
      p2p-client-ws/Cargo.toml
  25. 6
      p2p-client-ws/src/lib.rs
  26. 37
      p2p-client-ws/src/remote_ws.rs
  27. 22
      p2p-client-ws/src/remote_ws_wasm.rs
  28. 4
      p2p-net/Cargo.toml
  29. 2
      p2p-net/src/actor.rs
  30. 26
      p2p-net/src/broker.rs
  31. 44
      p2p-net/src/broker_connection.rs
  32. 35
      p2p-net/src/connection.rs
  33. 50
      p2p-net/src/lib.rs
  34. 31
      p2p-net/src/types.rs
  35. 17
      p2p-net/src/utils.rs
  36. 15
      p2p-repo/Cargo.toml
  37. 94
      p2p-repo/src/branch.rs
  38. 22
      p2p-repo/src/commit.rs
  39. 159
      p2p-repo/src/lib.rs
  40. 125
      p2p-repo/src/object.rs
  41. 12
      p2p-repo/src/utils.rs
  42. 1
      stores-lmdb/Cargo.toml
  43. 4
      stores-lmdb/src/kcv_store.rs
  44. 148
      stores-lmdb/src/repo_store.rs

1
.gitignore vendored

@ -1,4 +1,5 @@
*~ *~
.ng
.direnv .direnv
!.github !.github
\#* \#*

236
Cargo.lock generated

@ -92,12 +92,55 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "anstream"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is-terminal",
"utf8parse",
]
[[package]] [[package]]
name = "anstyle" name = "anstyle"
version = "1.0.0" version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d"
[[package]]
name = "anstyle-parse"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
"windows-sys 0.48.0",
]
[[package]]
name = "anstyle-wincon"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "180abfa45703aebe0093f79badacc01b8fd4ea2e35118747e5811127f926e188"
dependencies = [
"anstyle",
"windows-sys 0.48.0",
]
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.71" version = "1.0.71"
@ -676,6 +719,48 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "clap"
version = "4.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80672091db20273a15cf9fdd4e47ed43b5091ec9841bf4c6145c9dfbbcae09ed"
dependencies = [
"clap_builder",
"clap_derive",
"once_cell",
]
[[package]]
name = "clap_builder"
version = "4.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1458a1df40e1e2afebb7ab60ce55c1fa8f431146205aa5f4887e0b111c27636"
dependencies = [
"anstream",
"anstyle",
"bitflags",
"clap_lex",
"strsim",
]
[[package]]
name = "clap_derive"
version = "4.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.16",
]
[[package]]
name = "clap_lex"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b"
[[package]] [[package]]
name = "cocoa" name = "cocoa"
version = "0.24.1" version = "0.24.1"
@ -713,6 +798,12 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]]
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]] [[package]]
name = "combine" name = "combine"
version = "4.6.6" version = "4.6.6"
@ -1010,6 +1101,23 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f215f9b7224f49fb73256115331f677d868b34d18b65dbe4db392e6021eea90" checksum = "8f215f9b7224f49fb73256115331f677d868b34d18b65dbe4db392e6021eea90"
[[package]]
name = "default-net"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b06254cc7a9b82a9e3e78849eab1af3dbef006bf629b2928b882534d84ded6b9"
dependencies = [
"dlopen2",
"libc",
"memalloc",
"netlink-packet-core",
"netlink-packet-route",
"netlink-sys",
"once_cell",
"system-configuration",
"windows 0.48.0",
]
[[package]] [[package]]
name = "derive_more" name = "derive_more"
version = "0.99.17" version = "0.99.17"
@ -1076,6 +1184,29 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b" checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
[[package]]
name = "dlopen2"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b121caccfc363e4d9a4589528f3bef7c71b83c6ed01c8dc68cbeeb7fd29ec698"
dependencies = [
"dlopen2_derive",
"libc",
"once_cell",
"winapi",
]
[[package]]
name = "dlopen2_derive"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a09ac8bb8c16a282264c379dffba707b9c998afc7506009137f3c6136888078"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]] [[package]]
name = "doc-comment" name = "doc-comment"
version = "0.3.3" version = "0.3.3"
@ -2359,6 +2490,12 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5"
[[package]]
name = "memalloc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df39d232f5c40b0891c10216992c2f250c054105cb1e56f0fc9032db6203ecc1"
[[package]] [[package]]
name = "memchr" name = "memchr"
version = "2.5.0" version = "2.5.0"
@ -2476,6 +2613,55 @@ dependencies = [
"jni-sys", "jni-sys",
] ]
[[package]]
name = "netlink-packet-core"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e5cf0b54effda4b91615c40ff0fd12d0d4c9a6e0f5116874f03941792ff535a"
dependencies = [
"anyhow",
"byteorder",
"libc",
"netlink-packet-utils",
]
[[package]]
name = "netlink-packet-route"
version = "0.15.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea993e32c77d87f01236c38f572ecb6c311d592e56a06262a007fd2a6e31253c"
dependencies = [
"anyhow",
"bitflags",
"byteorder",
"libc",
"netlink-packet-core",
"netlink-packet-utils",
]
[[package]]
name = "netlink-packet-utils"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34"
dependencies = [
"anyhow",
"byteorder",
"paste",
"thiserror",
]
[[package]]
name = "netlink-sys"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411"
dependencies = [
"bytes",
"libc",
"log",
]
[[package]] [[package]]
name = "new_debug_unreachable" name = "new_debug_unreachable"
version = "1.0.4" version = "1.0.4"
@ -2487,7 +2673,6 @@ name = "ng-app"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-std", "async-std",
"debug_print",
"ng-wallet", "ng-wallet",
"p2p-net", "p2p-net",
"p2p-repo", "p2p-repo",
@ -2502,7 +2687,6 @@ name = "ng-sdk-js"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-std", "async-std",
"debug_print",
"futures", "futures",
"getrandom 0.1.16", "getrandom 0.1.16",
"gloo-timers", "gloo-timers",
@ -2532,7 +2716,6 @@ dependencies = [
"async-std", "async-std",
"base64-url", "base64-url",
"chacha20poly1305", "chacha20poly1305",
"debug_print",
"getrandom 0.1.16", "getrandom 0.1.16",
"image", "image",
"lazy_static", "lazy_static",
@ -2553,7 +2736,6 @@ version = "0.1.0"
dependencies = [ dependencies = [
"assert_cmd", "assert_cmd",
"async-std", "async-std",
"debug_print",
"ed25519-dalek", "ed25519-dalek",
"fastbloom-rs", "fastbloom-rs",
"futures", "futures",
@ -2571,9 +2753,18 @@ name = "ngd"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-std", "async-std",
"base64-url",
"clap",
"default-net",
"env_logger",
"log",
"p2p-broker", "p2p-broker",
"p2p-net", "p2p-net",
"p2p-repo", "p2p-repo",
"serde",
"serde_bare",
"serde_bytes",
"slice_as_array",
] ]
[[package]] [[package]]
@ -2582,7 +2773,6 @@ version = "0.1.0"
dependencies = [ dependencies = [
"base64-url", "base64-url",
"bytes", "bytes",
"debug_print",
"env_logger", "env_logger",
"log", "log",
"ng-wallet", "ng-wallet",
@ -2765,8 +2955,8 @@ dependencies = [
"async-std", "async-std",
"async-trait", "async-trait",
"async-tungstenite", "async-tungstenite",
"blake3",
"chacha20", "chacha20",
"debug_print",
"futures", "futures",
"getrandom 0.2.9", "getrandom 0.2.9",
"hex", "hex",
@ -2791,7 +2981,6 @@ dependencies = [
"async-trait", "async-trait",
"async-tungstenite", "async-tungstenite",
"chacha20", "chacha20",
"debug_print",
"futures", "futures",
"getrandom 0.2.9", "getrandom 0.2.9",
"p2p-net", "p2p-net",
@ -2814,11 +3003,9 @@ dependencies = [
"async-std", "async-std",
"async-trait", "async-trait",
"blake3", "blake3",
"debug_print",
"ed25519-dalek", "ed25519-dalek",
"futures", "futures",
"getrandom 0.2.9", "getrandom 0.2.9",
"gloo-timers",
"noise-protocol", "noise-protocol",
"noise-rust-crypto", "noise-rust-crypto",
"num_enum", "num_enum",
@ -2842,11 +3029,14 @@ dependencies = [
"ed25519-dalek", "ed25519-dalek",
"fastbloom-rs", "fastbloom-rs",
"futures", "futures",
"gloo-timers",
"hex", "hex",
"log",
"rand 0.7.3", "rand 0.7.3",
"serde", "serde",
"serde_bare", "serde_bare",
"serde_bytes", "serde_bytes",
"wasm-bindgen",
"web-time", "web-time",
] ]
@ -4021,7 +4211,6 @@ dependencies = [
name = "stores-lmdb" name = "stores-lmdb"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"debug_print",
"hex", "hex",
"p2p-repo", "p2p-repo",
"rkv", "rkv",
@ -4101,6 +4290,27 @@ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]]
name = "system-configuration"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
dependencies = [
"bitflags",
"core-foundation",
"system-configuration-sys",
]
[[package]]
name = "system-configuration-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]] [[package]]
name = "system-deps" name = "system-deps"
version = "6.1.0" version = "6.1.0"
@ -4797,6 +5007,12 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
[[package]]
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]] [[package]]
name = "uuid" name = "uuid"
version = "0.8.2" version = "0.8.2"

@ -17,7 +17,6 @@ crate-type = ["staticlib", "cdylib", "rlib"]
tauri-build = { version = "2.0.0-alpha.5", features = [] } tauri-build = { version = "2.0.0-alpha.5", features = [] }
[dependencies] [dependencies]
debug_print = "1.0.0"
tauri = { version = "2.0.0-alpha.9", features = [] } tauri = { version = "2.0.0-alpha.9", features = [] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"

@ -10,8 +10,8 @@ use async_std::stream::StreamExt;
use ng_wallet::types::*; use ng_wallet::types::*;
use ng_wallet::*; use ng_wallet::*;
use p2p_net::broker::*; use p2p_net::broker::*;
use p2p_net::log;
use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend}; use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend};
use p2p_repo::log::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use tauri::{App, Manager}; use tauri::{App, Manager};
@ -30,13 +30,13 @@ pub type SetupHook = Box<dyn FnOnce(&mut App) -> Result<(), Box<dyn std::error::
#[tauri::command(rename_all = "snake_case")] #[tauri::command(rename_all = "snake_case")]
async fn test() -> Result<(), ()> { async fn test() -> Result<(), ()> {
log!("test is {}", BROKER.read().await.test()); log_info!("test is {}", BROKER.read().await.test());
Ok(()) Ok(())
} }
#[tauri::command(rename_all = "snake_case")] #[tauri::command(rename_all = "snake_case")]
async fn wallet_gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> Result<ShuffledPazzle, ()> { async fn wallet_gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> Result<ShuffledPazzle, ()> {
log!( log_info!(
"wallet_gen_shuffle_for_pazzle_opening from rust {}", "wallet_gen_shuffle_for_pazzle_opening from rust {}",
pazzle_length pazzle_length
); );
@ -45,7 +45,7 @@ async fn wallet_gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> Result<Shuf
#[tauri::command(rename_all = "snake_case")] #[tauri::command(rename_all = "snake_case")]
async fn wallet_gen_shuffle_for_pin() -> Result<Vec<u8>, ()> { async fn wallet_gen_shuffle_for_pin() -> Result<Vec<u8>, ()> {
log!("wallet_gen_shuffle_for_pin from rust"); log_info!("wallet_gen_shuffle_for_pin from rust");
Ok(gen_shuffle_for_pin()) Ok(gen_shuffle_for_pin())
} }
@ -55,13 +55,13 @@ async fn wallet_open_wallet_with_pazzle(
pazzle: Vec<u8>, pazzle: Vec<u8>,
pin: [u8; 4], pin: [u8; 4],
) -> Result<EncryptedWallet, String> { ) -> Result<EncryptedWallet, String> {
log!("wallet_open_wallet_with_pazzle from rust {:?}", pazzle); log_info!("wallet_open_wallet_with_pazzle from rust {:?}", pazzle);
open_wallet_with_pazzle(wallet, pazzle, pin).map_err(|e| e.to_string()) open_wallet_with_pazzle(wallet, pazzle, pin).map_err(|e| e.to_string())
} }
#[tauri::command(rename_all = "snake_case")] #[tauri::command(rename_all = "snake_case")]
async fn wallet_create_wallet(mut params: CreateWalletV0) -> Result<CreateWalletResultV0, String> { async fn wallet_create_wallet(mut params: CreateWalletV0) -> Result<CreateWalletResultV0, String> {
//log!("wallet_create_wallet from rust {:?}", params); //log_info!("wallet_create_wallet from rust {:?}", params);
params.result_with_wallet_file = false; params.result_with_wallet_file = false;
let local_save = params.local_save; let local_save = params.local_save;
let res = create_wallet_v0(params).await.map_err(|e| e.to_string()); let res = create_wallet_v0(params).await.map_err(|e| e.to_string());
@ -77,7 +77,7 @@ async fn wallet_create_wallet(mut params: CreateWalletV0) -> Result<CreateWallet
#[tauri::command(rename_all = "snake_case")] #[tauri::command(rename_all = "snake_case")]
async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) -> Result<(), ()> { async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) -> Result<(), ()> {
log!("doc_sync_branch {} {}", nuri, stream_id); log_info!("doc_sync_branch {} {}", nuri, stream_id);
let mut reader; let mut reader;
{ {
@ -99,7 +99,7 @@ async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) ->
BROKER.write().await.tauri_stream_cancel(stream_id); BROKER.write().await.tauri_stream_cancel(stream_id);
log!("END OF LOOP"); log_info!("END OF LOOP");
Ok(()) Ok(())
} }
@ -110,7 +110,7 @@ async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) ->
#[tauri::command(rename_all = "snake_case")] #[tauri::command(rename_all = "snake_case")]
async fn cancel_doc_sync_branch(stream_id: &str) -> Result<(), ()> { async fn cancel_doc_sync_branch(stream_id: &str) -> Result<(), ()> {
log!("cancel stream {}", stream_id); log_info!("cancel stream {}", stream_id);
BROKER BROKER
.write() .write()
.await .await
@ -123,7 +123,7 @@ async fn doc_get_file_from_store_with_object_ref(
nuri: &str, nuri: &str,
obj_ref: ObjectRef, obj_ref: ObjectRef,
) -> Result<ObjectContent, String> { ) -> Result<ObjectContent, String> {
log!( log_info!(
"doc_get_file_from_store_with_object_ref {} {:?}", "doc_get_file_from_store_with_object_ref {} {:?}",
nuri, nuri,
obj_ref obj_ref

@ -23,7 +23,6 @@ ng-wallet = { path = "../ng-wallet" }
async-std = { version = "1.12.0", features = ["attributes","unstable"] } async-std = { version = "1.12.0", features = ["attributes","unstable"] }
futures = "0.3.24" futures = "0.3.24"
pharos = "0.5" pharos = "0.5"
debug_print = "1.0.0"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0" serde_bare = "0.5.0"
serde_bytes = "0.11.7" serde_bytes = "0.11.7"

@ -23,7 +23,7 @@ use p2p_net::broker::*;
use p2p_net::connection::{ClientConfig, StartConfig}; use p2p_net::connection::{ClientConfig, StartConfig};
use p2p_net::types::{DirectPeerId, IP}; use p2p_net::types::{DirectPeerId, IP};
use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend, Sender}; use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend, Sender};
use p2p_net::{log, sleep}; use p2p_repo::log::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::generate_keypair; use p2p_repo::utils::generate_keypair;
use serde_json::json; use serde_json::json;
@ -31,6 +31,7 @@ use std::net::IpAddr;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use wasm_bindgen::prelude::*; use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_futures::{future_to_promise, JsFuture}; use wasm_bindgen_futures::{future_to_promise, JsFuture};
#[cfg(target_arch = "wasm32")] #[cfg(target_arch = "wasm32")]
@ -112,7 +113,7 @@ extern "C" {
#[cfg(target_arch = "wasm32")] #[cfg(target_arch = "wasm32")]
#[wasm_bindgen] #[wasm_bindgen]
pub async fn test() { pub async fn test() {
log!("test is {}", BROKER.read().await.test()); log_info!("test is {}", BROKER.read().await.test());
} }
#[cfg(target_arch = "wasm32")] #[cfg(target_arch = "wasm32")]
@ -123,7 +124,7 @@ pub async fn doc_get_file_from_store_with_object_ref(
) -> Result<JsValue, JsValue> { ) -> Result<JsValue, JsValue> {
let obj_ref = serde_wasm_bindgen::from_value::<ObjectRef>(obj_ref_js).unwrap(); let obj_ref = serde_wasm_bindgen::from_value::<ObjectRef>(obj_ref_js).unwrap();
log!( log_info!(
"doc_get_file {} {:?} {}", "doc_get_file {} {:?} {}",
nuri, nuri,
obj_ref.id, obj_ref.id,
@ -181,14 +182,14 @@ pub async fn doc_sync_branch(anuri: String, callback: &js_sys::Function) -> JsVa
Err(_) => {} Err(_) => {}
} }
} }
log!("END OF LOOP"); log_info!("END OF LOOP");
Ok(()) Ok(())
} }
spawn_and_log_error(inner_task(reader, anuri, callback.clone())); spawn_and_log_error(inner_task(reader, anuri, callback.clone()));
let cb = Closure::once(move || { let cb = Closure::once(move || {
log!("close channel"); log_info!("close channel");
sender.close_channel() sender.close_channel()
}); });
//Closure::wrap(Box::new(move |sender| sender.close_channel()) as Box<FnMut(Sender<Commit>)>); //Closure::wrap(Box::new(move |sender| sender.close_channel()) as Box<FnMut(Sender<Commit>)>);
@ -200,7 +201,7 @@ pub async fn doc_sync_branch(anuri: String, callback: &js_sys::Function) -> JsVa
#[cfg(target_arch = "wasm32")] #[cfg(target_arch = "wasm32")]
#[wasm_bindgen] #[wasm_bindgen]
pub async fn start() { pub async fn start() {
log!("random {}", random(10)); log_info!("random {}", random(10));
// let mut random_buf = [0u8; 32]; // let mut random_buf = [0u8; 32];
// getrandom::getrandom(&mut random_buf).unwrap(); // getrandom::getrandom(&mut random_buf).unwrap();
@ -217,7 +218,7 @@ pub async fn start() {
let (client_priv_key, client_pub_key) = generate_keypair(); let (client_priv_key, client_pub_key) = generate_keypair();
let (user_priv_key, user_pub_key) = generate_keypair(); let (user_priv_key, user_pub_key) = generate_keypair();
log!("start connecting"); log_info!("start connecting");
let res = BROKER let res = BROKER
.write() .write()
@ -236,7 +237,7 @@ pub async fn start() {
}), }),
) )
.await; .await;
log!("broker.connect : {:?}", res); log_info!("broker.connect : {:?}", res);
if res.is_err() { if res.is_err() {
return Ok(()); return Ok(());
//panic!("Cannot connect"); //panic!("Cannot connect");
@ -248,7 +249,7 @@ pub async fn start() {
async fn timer_close(remote_peer_id: DirectPeerId) -> ResultSend<()> { async fn timer_close(remote_peer_id: DirectPeerId) -> ResultSend<()> {
async move { async move {
sleep!(std::time::Duration::from_secs(3)); sleep!(std::time::Duration::from_secs(3));
log!("timeout"); log_info!("timeout");
BROKER BROKER
.write() .write()
.await .await

@ -8,7 +8,6 @@ description = "keeps the secret keys of all identities of the user in a safe wal
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
debug_print = "1.0.0"
serde = { version = "1.0.142", features = ["derive"] } serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0" serde_bare = "0.5.0"
serde_bytes = "0.11.7" serde_bytes = "0.11.7"

@ -9,13 +9,10 @@
// #[macro_use] // #[macro_use]
// extern crate slice_as_array; // extern crate slice_as_array;
#[macro_use]
extern crate p2p_net;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
use p2p_net::log;
pub mod types; pub mod types;
pub mod bip39; pub mod bip39;
@ -36,6 +33,7 @@ use chacha20poly1305::XChaCha20Poly1305;
use image::{imageops::FilterType, io::Reader as ImageReader, ImageOutputFormat}; use image::{imageops::FilterType, io::Reader as ImageReader, ImageOutputFormat};
use safe_transmute::transmute_to_bytes; use safe_transmute::transmute_to_bytes;
use p2p_repo::log::*;
use p2p_repo::types::{PubKey, Site, SiteType, Timestamp}; use p2p_repo::types::{PubKey, Site, SiteType, Timestamp};
use p2p_repo::utils::{generate_keypair, now_timestamp, sign, verify}; use p2p_repo::utils::{generate_keypair, now_timestamp, sign, verify};
use rand::prelude::*; use rand::prelude::*;
@ -61,7 +59,7 @@ pub fn enc_master_key(
.map_err(|e| NgWalletError::EncryptionError)?; .map_err(|e| NgWalletError::EncryptionError)?;
// `buffer` now contains the encrypted master key // `buffer` now contains the encrypted master key
// println!("cipher {:?}", buffer); // log_debug!("cipher {:?}", buffer);
Ok(buffer.into_array::<48>().unwrap()) Ok(buffer.into_array::<48>().unwrap())
} }
@ -124,7 +122,7 @@ pub fn enc_encrypted_block(
.map_err(|e| NgWalletError::EncryptionError)?; .map_err(|e| NgWalletError::EncryptionError)?;
// `buffer` now contains the message ciphertext // `buffer` now contains the message ciphertext
// println!("encrypted_block ciphertext {:?}", buffer); // log_debug!("encrypted_block ciphertext {:?}", buffer);
Ok(buffer) Ok(buffer)
} }
@ -151,7 +149,7 @@ pub fn dec_encrypted_block(
.map_err(|e| NgWalletError::DecryptionError)?; .map_err(|e| NgWalletError::DecryptionError)?;
// `ciphertext` now contains the decrypted block // `ciphertext` now contains the decrypted block
//println!("decrypted_block {:?}", ciphertext); //log_debug!("decrypted_block {:?}", ciphertext);
let decrypted_block = let decrypted_block =
from_slice::<EncryptedWalletV0>(&ciphertext).map_err(|e| NgWalletError::DecryptionError)?; from_slice::<EncryptedWalletV0>(&ciphertext).map_err(|e| NgWalletError::DecryptionError)?;
@ -205,7 +203,7 @@ pub fn open_wallet_with_pazzle(
v0.id, v0.id,
)?; )?;
log!( log_info!(
"opening of wallet with pazzle took: {} ms", "opening of wallet with pazzle took: {} ms",
opening_pazzle.elapsed().as_millis() opening_pazzle.elapsed().as_millis()
); );
@ -287,16 +285,16 @@ pub fn display_pazzle(pazzle: &Vec<u8>) -> Vec<String> {
pub fn gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> ShuffledPazzle { pub fn gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> ShuffledPazzle {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let mut category_indices: Vec<u8> = (0..pazzle_length).collect(); let mut category_indices: Vec<u8> = (0..pazzle_length).collect();
//log!("{:?}", category_indices); //log_info!("{:?}", category_indices);
category_indices.shuffle(&mut rng); category_indices.shuffle(&mut rng);
//log!("{:?}", category_indices); //log_info!("{:?}", category_indices);
let mut emoji_indices: Vec<Vec<u8>> = Vec::with_capacity(pazzle_length.into()); let mut emoji_indices: Vec<Vec<u8>> = Vec::with_capacity(pazzle_length.into());
for _ in 0..pazzle_length { for _ in 0..pazzle_length {
let mut idx: Vec<u8> = (0..15).collect(); let mut idx: Vec<u8> = (0..15).collect();
//log!("{:?}", idx); //log_info!("{:?}", idx);
idx.shuffle(&mut rng); idx.shuffle(&mut rng);
//log!("{:?}", idx); //log_info!("{:?}", idx);
emoji_indices.push(idx) emoji_indices.push(idx)
} }
ShuffledPazzle { ShuffledPazzle {
@ -308,9 +306,9 @@ pub fn gen_shuffle_for_pazzle_opening(pazzle_length: u8) -> ShuffledPazzle {
pub fn gen_shuffle_for_pin() -> Vec<u8> { pub fn gen_shuffle_for_pin() -> Vec<u8> {
let mut rng = rand::thread_rng(); let mut rng = rand::thread_rng();
let mut digits: Vec<u8> = (0..10).collect(); let mut digits: Vec<u8> = (0..10).collect();
//log!("{:?}", digits); //log_info!("{:?}", digits);
digits.shuffle(&mut rng); digits.shuffle(&mut rng);
//log!("{:?}", digits); //log_info!("{:?}", digits);
digits digits
} }
@ -324,7 +322,7 @@ pub fn gen_shuffle_for_pin() -> Vec<u8> {
// for i in &mut mnemonic { // for i in &mut mnemonic {
// *i = choices.chars().nth(ran.gen_range(0, 72)).unwrap(); // *i = choices.chars().nth(ran.gen_range(0, 72)).unwrap();
// } // }
// log!("{}", mnemonic.iter().collect::<String>()); // log_info!("{}", mnemonic.iter().collect::<String>());
// } // }
/// creates a Wallet from a pin, a security text and image (with option to send the bootstrap and wallet to nextgraph.one) /// creates a Wallet from a pin, a security text and image (with option to send the bootstrap and wallet to nextgraph.one)
@ -438,14 +436,14 @@ pub async fn create_wallet_v0(
*i = ran.gen_range(0, 15) + (category_indices[ix] << 4); *i = ran.gen_range(0, 15) + (category_indices[ix] << 4);
} }
//println!("pazzle {:?}", pazzle); //log_debug!("pazzle {:?}", pazzle);
let mut mnemonic = [0u16; 12]; let mut mnemonic = [0u16; 12];
for i in &mut mnemonic { for i in &mut mnemonic {
*i = ran.gen_range(0, 2048); *i = ran.gen_range(0, 2048);
} }
//println!("mnemonic {:?}", display_mnemonic(&mnemonic)); //log_debug!("mnemonic {:?}", display_mnemonic(&mnemonic));
//slice_as_array!(&mnemonic, [String; 12]) //slice_as_array!(&mnemonic, [String; 12])
//.ok_or(NgWalletError::InternalError)? //.ok_or(NgWalletError::InternalError)?
@ -478,8 +476,8 @@ pub async fn create_wallet_v0(
let mut salt_mnemonic = [0u8; 16]; let mut salt_mnemonic = [0u8; 16];
getrandom::getrandom(&mut salt_mnemonic).map_err(|e| NgWalletError::InternalError)?; getrandom::getrandom(&mut salt_mnemonic).map_err(|e| NgWalletError::InternalError)?;
//println!("salt_pazzle {:?}", salt_pazzle); //log_debug!("salt_pazzle {:?}", salt_pazzle);
//println!("salt_mnemonic {:?}", salt_mnemonic); //log_debug!("salt_mnemonic {:?}", salt_mnemonic);
let mnemonic_key = derive_key_from_pass( let mnemonic_key = derive_key_from_pass(
[transmute_to_bytes(&mnemonic), &params.pin].concat(), [transmute_to_bytes(&mnemonic), &params.pin].concat(),
@ -541,7 +539,7 @@ pub async fn create_wallet_v0(
// TODO send bootstrap (if) // TODO send bootstrap (if)
// TODO send wallet (if) // TODO send wallet (if)
log!( log_info!(
"creating of wallet took: {} ms", "creating of wallet took: {} ms",
creating_pazzle.elapsed().as_millis() creating_pazzle.elapsed().as_millis()
); );
@ -577,11 +575,11 @@ mod tests {
#[test] #[test]
fn test_gen_shuffle() { fn test_gen_shuffle() {
let shuffle = gen_shuffle_for_pazzle_opening(9); let shuffle = gen_shuffle_for_pazzle_opening(9);
log!("{:?}", shuffle); log_info!("{:?}", shuffle);
let shuffle = gen_shuffle_for_pazzle_opening(12); let shuffle = gen_shuffle_for_pazzle_opening(12);
log!("{:?}", shuffle); log_info!("{:?}", shuffle);
let shuffle = gen_shuffle_for_pazzle_opening(15); let shuffle = gen_shuffle_for_pazzle_opening(15);
log!("{:?}", shuffle); log_info!("{:?}", shuffle);
let digits = gen_shuffle_for_pin(); let digits = gen_shuffle_for_pin();
let digits = gen_shuffle_for_pin(); let digits = gen_shuffle_for_pin();
} }
@ -618,26 +616,26 @@ mod tests {
.await .await
.expect("create_wallet_v0"); .expect("create_wallet_v0");
log!( log_info!(
"creation of wallet took: {} ms", "creation of wallet took: {} ms",
creation.elapsed().as_millis() creation.elapsed().as_millis()
); );
log!("-----------------------------"); log_info!("-----------------------------");
let mut file = File::create("tests/wallet.ngw").expect("open wallet write file"); let mut file = File::create("tests/wallet.ngw").expect("open wallet write file");
let ser_wallet = to_vec(&NgFile::V0(NgFileV0::Wallet(res.wallet.clone()))).unwrap(); let ser_wallet = to_vec(&NgFile::V0(NgFileV0::Wallet(res.wallet.clone()))).unwrap();
file.write_all(&ser_wallet); file.write_all(&ser_wallet);
log!( log_info!(
"wallet id: {:?}", "wallet id: {:?}",
base64_url::encode(&res.wallet.id().slice()) base64_url::encode(&res.wallet.id().slice())
); );
log!("pazzle {:?}", display_pazzle(&res.pazzle)); log_info!("pazzle {:?}", display_pazzle(&res.pazzle));
log!("mnemonic {:?}", display_mnemonic(&res.mnemonic)); log_info!("mnemonic {:?}", display_mnemonic(&res.mnemonic));
log!("pin {:?}", pin); log_info!("pin {:?}", pin);
if let Wallet::V0(v0) = res.wallet { if let Wallet::V0(v0) = res.wallet {
log!("security text: {:?}", v0.content.security_txt); log_info!("security text: {:?}", v0.content.security_txt);
let mut file = let mut file =
File::create("tests/generated_security_image.jpg").expect("open write file"); File::create("tests/generated_security_image.jpg").expect("open write file");
@ -658,9 +656,9 @@ mod tests {
let w = open_wallet_with_mnemonic(Wallet::V0(v0.clone()), res.mnemonic, pin) let w = open_wallet_with_mnemonic(Wallet::V0(v0.clone()), res.mnemonic, pin)
.expect("open with mnemonic"); .expect("open with mnemonic");
//println!("encrypted part {:?}", w); //log_debug!("encrypted part {:?}", w);
log!( log_info!(
"opening of wallet with mnemonic took: {} ms", "opening of wallet with mnemonic took: {} ms",
opening_mnemonic.elapsed().as_millis() opening_mnemonic.elapsed().as_millis()
); );
@ -669,12 +667,12 @@ mod tests {
let opening_pazzle = Instant::now(); let opening_pazzle = Instant::now();
let w = open_wallet_with_pazzle(Wallet::V0(v0.clone()), res.pazzle, pin) let w = open_wallet_with_pazzle(Wallet::V0(v0.clone()), res.pazzle, pin)
.expect("open with pazzle"); .expect("open with pazzle");
log!( log_info!(
"opening of wallet with pazzle took: {} ms", "opening of wallet with pazzle took: {} ms",
opening_pazzle.elapsed().as_millis() opening_pazzle.elapsed().as_millis()
); );
} }
//println!("encrypted part {:?}", w); //log_debug!("encrypted part {:?}", w);
} }
} }
} }

@ -12,7 +12,7 @@ use std::fmt;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_big_array::BigArray; use serde_big_array::BigArray;
use p2p_net::types::NetAddr; use p2p_net::types::{BrokerServerV0, NetAddr};
use p2p_repo::types::*; use p2p_repo::types::*;
/// WalletId is a PubKey /// WalletId is a PubKey
@ -21,31 +21,11 @@ pub type WalletId = PubKey;
/// BootstrapId is a WalletId /// BootstrapId is a WalletId
pub type BootstrapId = WalletId; pub type BootstrapId = WalletId;
/// BootstrapServer type
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub enum BoostrapServerTypeV0 {
Localhost(u16), // optional port number
BoxPrivate(Vec<NetAddr>),
BoxPublic(Vec<NetAddr>),
BoxPublicDyn(Vec<NetAddr>), // can be empty
Domain(String), // accepts an option trailing ":port" number
}
/// BootstrapServer details Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BootstrapServerV0 {
/// Network addresses
pub server_type: BoostrapServerTypeV0,
/// peerId of the server
pub peer_id: PubKey,
}
/// Bootstrap content Version 0 /// Bootstrap content Version 0
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BootstrapContentV0 { pub struct BootstrapContentV0 {
/// list of servers, in order of preference /// list of servers, in order of preference
pub servers: Vec<BootstrapServerV0>, pub servers: Vec<BrokerServerV0>,
} }
/// Bootstrap Version 0 /// Bootstrap Version 0

@ -8,7 +8,6 @@ description = "CLI command-line interpreter of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" } p2p-net = { path = "../p2p-net" }
p2p-client-ws = { path = "../p2p-client-ws" } p2p-client-ws = { path = "../p2p-client-ws" }

@ -9,21 +9,21 @@
// notice may not be copied, modified, or distributed except // notice may not be copied, modified, or distributed except
// according to those terms. // according to those terms.
use debug_print::*;
use ed25519_dalek::*; use ed25519_dalek::*;
use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership}; use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership};
use futures::{future, pin_mut, stream, SinkExt, StreamExt}; use futures::{future, pin_mut, stream, SinkExt, StreamExt};
use p2p_broker::broker_store::config::ConfigMode; use p2p_broker::broker_store::config::ConfigMode;
use p2p_repo::object::Object; use p2p_repo::object::Object;
use p2p_repo::store::{store_max_value_size, store_valid_value_size, HashMapRepoStore, RepoStore}; use p2p_repo::store::{store_max_value_size, store_valid_value_size, HashMapRepoStore, RepoStore};
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use rand::rngs::OsRng; use rand::rngs::OsRng;
use std::collections::HashMap; use std::collections::HashMap;
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use p2p_net::errors::*; use p2p_net::errors::*;
use p2p_net::types::*; use p2p_net::types::*;
use p2p_repo::log::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp}; use p2p_repo::utils::{generate_keypair, now_timestamp};
@ -50,9 +50,9 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
repo_pubkey, repo_pubkey,
repo_secret, repo_secret,
); );
//println!(">>> add_obj"); //log_debug!(">>> add_obj");
println!(" id: {}", obj.id()); log_debug!(" id: {}", obj.id());
//println!(" deps: {:?}", obj.deps()); //log_debug!(" deps: {:?}", obj.deps());
obj.save(store).unwrap(); obj.save(store).unwrap();
obj.reference().unwrap() obj.reference().unwrap()
} }
@ -94,7 +94,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
expiry, expiry,
) )
.unwrap(); .unwrap();
//println!("commit: {}", commit.id().unwrap()); //log_debug!("commit: {}", commit.id().unwrap());
add_obj( add_obj(
ObjectContent::Commit(commit), ObjectContent::Commit(commit),
obj_deps, obj_deps,
@ -114,7 +114,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
let deps = vec![]; let deps = vec![];
let expiry = None; let expiry = None;
let body = CommitBody::Branch(branch); let body = CommitBody::Branch(branch);
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContent::CommitBody(body), ObjectContent::CommitBody(body),
deps, deps,
@ -134,7 +134,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
let expiry = None; let expiry = None;
let content = [7u8; 777].to_vec(); let content = [7u8; 777].to_vec();
let body = CommitBody::Transaction(Transaction::V0(content)); let body = CommitBody::Transaction(Transaction::V0(content));
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContent::CommitBody(body), ObjectContent::CommitBody(body),
deps, deps,
@ -153,7 +153,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
) -> ObjectRef { ) -> ObjectRef {
let expiry = None; let expiry = None;
let body = CommitBody::Ack(Ack::V0()); let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContent::CommitBody(body), ObjectContent::CommitBody(body),
deps, deps,
@ -170,12 +170,12 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// repo // repo
let repo_keypair: Keypair = Keypair::generate(&mut rng); let repo_keypair: Keypair = Keypair::generate(&mut rng);
// println!( // log_debug!(
// "repo private key: ({}) {:?}", // "repo private key: ({}) {:?}",
// repo_keypair.secret.as_bytes().len(), // repo_keypair.secret.as_bytes().len(),
// repo_keypair.secret.as_bytes() // repo_keypair.secret.as_bytes()
// ); // );
// println!( // log_debug!(
// "repo public key: ({}) {:?}", // "repo public key: ({}) {:?}",
// repo_keypair.public.as_bytes().len(), // repo_keypair.public.as_bytes().len(),
// repo_keypair.public.as_bytes() // repo_keypair.public.as_bytes()
@ -193,11 +193,11 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// branch // branch
let branch_keypair: Keypair = Keypair::generate(&mut rng); let branch_keypair: Keypair = Keypair::generate(&mut rng);
//println!("branch public key: {:?}", branch_keypair.public.as_bytes()); //log_debug!("branch public key: {:?}", branch_keypair.public.as_bytes());
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes()); let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes());
let member_keypair: Keypair = Keypair::generate(&mut rng); let member_keypair: Keypair = Keypair::generate(&mut rng);
//println!("member public key: {:?}", member_keypair.public.as_bytes()); //log_debug!("member public key: {:?}", member_keypair.public.as_bytes());
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes()); let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes());
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes()); let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes());
@ -221,18 +221,18 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
tags, tags,
metadata, metadata,
); );
//println!("branch: {:?}", branch); //log_debug!("branch: {:?}", branch);
println!("branch deps/acks:"); log_debug!("branch deps/acks:");
println!(""); log_debug!("");
println!(" br"); log_debug!(" br");
println!(" / \\"); log_debug!(" / \\");
println!(" t1 t2"); log_debug!(" t1 t2");
println!(" / \\ / \\"); log_debug!(" / \\ / \\");
println!(" a3 t4<--t5-->(t1)"); log_debug!(" a3 t4<--t5-->(t1)");
println!(" / \\"); log_debug!(" / \\");
println!(" a6 a7"); log_debug!(" a6 a7");
println!(""); log_debug!("");
// commit bodies // commit bodies
@ -247,7 +247,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// create & add commits to store // create & add commits to store
println!(">> br"); log_debug!(">> br");
let br = add_commit( let br = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -261,7 +261,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> t1"); log_debug!(">> t1");
let t1 = add_commit( let t1 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -275,7 +275,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> t2"); log_debug!(">> t2");
let t2 = add_commit( let t2 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -289,7 +289,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> a3"); log_debug!(">> a3");
let a3 = add_commit( let a3 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -303,7 +303,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> t4"); log_debug!(">> t4");
let t4 = add_commit( let t4 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -317,7 +317,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> t5"); log_debug!(">> t5");
let t5 = add_commit( let t5 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -331,7 +331,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> a6"); log_debug!(">> a6");
let a6 = add_commit( let a6 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -345,7 +345,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
println!(">> a7"); log_debug!(">> a7");
let a7 = add_commit( let a7 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -366,7 +366,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
// Sending everything to the broker // Sending everything to the broker
for (v) in store.get_all() { for (v) in store.get_all() {
//debug_println!("SENDING {}", k); //log_debug!("SENDING {}", k);
let _ = public_overlay_cnx let _ = public_overlay_cnx
.put_block(&v) .put_block(&v)
.await .await
@ -403,7 +403,7 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
&mut store, &mut store,
); );
debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len()); log_debug!("LOCAL STORE HAS {} BLOCKS", store.get_len());
// Let's pretend that we know that the head of the branch in the broker is at commits a6 and a7. // Let's pretend that we know that the head of the branch in the broker is at commits a6 and a7.
// normally it would be the pub/sub that notifies us of those heads. // normally it would be the pub/sub that notifies us of those heads.
@ -433,14 +433,14 @@ async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpr
let mut i = 0; let mut i = 0;
while let Some(b) = synced_blocks_stream.next().await { while let Some(b) = synced_blocks_stream.next().await {
debug_println!("GOT BLOCK {}", b.id()); log_debug!("GOT BLOCK {}", b.id());
store.put(&b); store.put(&b);
i += 1; i += 1;
} }
debug_println!("SYNCED {} BLOCKS", i); log_debug!("SYNCED {} BLOCKS", i);
debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len()); log_debug!("LOCAL STORE HAS {} BLOCKS", store.get_len());
// now the client can verify the DAG and each commit. Then update its list of heads. // now the client can verify the DAG and each commit. Then update its list of heads.
} }
@ -471,7 +471,7 @@ async fn test(
}); });
let mut public_overlay_cnx = cnx.overlay_connect(&repo, true).await?; let mut public_overlay_cnx = cnx.overlay_connect(&repo, true).await?;
debug_println!("put_block"); log_debug!("put_block");
let my_block_id = public_overlay_cnx let my_block_id = public_overlay_cnx
.put_block(&Block::new( .put_block(&Block::new(
@ -483,7 +483,7 @@ async fn test(
)) ))
.await?; .await?;
debug_println!("added block_id to store {}", my_block_id); log_debug!("added block_id to store {}", my_block_id);
let object_id = public_overlay_cnx let object_id = public_overlay_cnx
.put_object( .put_object(
@ -500,7 +500,7 @@ async fn test(
) )
.await?; .await?;
debug_println!("added object_id to store {}", object_id); log_debug!("added object_id to store {}", object_id);
let mut my_block_stream = public_overlay_cnx let mut my_block_stream = public_overlay_cnx
.get_block(my_block_id, true, None) .get_block(my_block_id, true, None)
@ -508,27 +508,27 @@ async fn test(
//.expect("get_block failed"); //.expect("get_block failed");
while let Some(b) = my_block_stream.next().await { while let Some(b) = my_block_stream.next().await {
debug_println!("GOT BLOCK {}", b.id()); log_debug!("GOT BLOCK {}", b.id());
} }
let mut my_object_stream = public_overlay_cnx.get_block(object_id, true, None).await?; let mut my_object_stream = public_overlay_cnx.get_block(object_id, true, None).await?;
//.expect("get_block for object failed"); //.expect("get_block for object failed");
while let Some(b) = my_object_stream.next().await { while let Some(b) = my_object_stream.next().await {
debug_println!("GOT BLOCK {}", b.id()); log_debug!("GOT BLOCK {}", b.id());
} }
let object = public_overlay_cnx.get_object(object_id, None).await?; let object = public_overlay_cnx.get_object(object_id, None).await?;
//.expect("get_object failed"); //.expect("get_object failed");
debug_println!("GOT OBJECT with ID {}", object.id()); log_debug!("GOT OBJECT with ID {}", object.id());
// let object_id = public_overlay_cnx // let object_id = public_overlay_cnx
// .copy_object(object_id, Some(now_timestamp() + 60)) // .copy_object(object_id, Some(now_timestamp() + 60))
// .await // .await
// .expect("copy_object failed"); // .expect("copy_object failed");
// debug_println!("COPIED OBJECT to OBJECT ID {}", object_id); // log_debug!("COPIED OBJECT to OBJECT ID {}", object_id);
public_overlay_cnx.delete_object(object_id).await?; public_overlay_cnx.delete_object(object_id).await?;
//.expect("delete_object failed"); //.expect("delete_object failed");
@ -538,7 +538,7 @@ async fn test(
.await .await
.unwrap_err(); .unwrap_err();
debug_println!("result from get object after delete: {}", res); log_debug!("result from get object after delete: {}", res);
assert_eq!(res, ProtocolError::NotFound); assert_eq!(res, ProtocolError::NotFound);
//TODO test pin/unpin //TODO test pin/unpin
@ -551,12 +551,12 @@ async fn test(
} }
async fn test_local_connection() { async fn test_local_connection() {
debug_println!("===== TESTING LOCAL API ====="); log_debug!("===== TESTING LOCAL API =====");
let root = tempfile::Builder::new().prefix("ngcli").tempdir().unwrap(); let root = tempfile::Builder::new().prefix("ngcli").tempdir().unwrap();
let master_key: [u8; 32] = [0; 32]; let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap(); std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let store = LmdbKCVStore::open(root.path(), master_key); let store = LmdbKCVStore::open(root.path(), master_key);
//let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker"); //let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker");
@ -569,7 +569,7 @@ async fn test_local_connection() {
} }
async fn test_remote_connection(url: &str) { async fn test_remote_connection(url: &str) {
debug_println!("===== TESTING REMOTE API ====="); log_debug!("===== TESTING REMOTE API =====");
let (priv_key, pub_key) = generate_keypair(); let (priv_key, pub_key) = generate_keypair();
@ -580,7 +580,7 @@ async fn test_remote_connection(url: &str) {
#[async_std::main] #[async_std::main]
async fn main() -> std::io::Result<()> { async fn main() -> std::io::Result<()> {
debug_println!("Starting nextgraph CLI..."); log_debug!("Starting nextgraph CLI...");
//test_local_connection().await; //test_local_connection().await;
@ -606,12 +606,12 @@ mod test {
#[async_std::test] #[async_std::test]
pub async fn test_remote_cnx() -> Result<(), Box<dyn std::error::Error>> { pub async fn test_remote_cnx() -> Result<(), Box<dyn std::error::Error>> {
let keys = gen_keys(); let keys = gen_keys();
// println!("Public key of node: {:?}", keys.1); // log_debug!("Public key of node: {:?}", keys.1);
// println!("Private key of node: {:?}", keys.0.as_slice()); // log_debug!("Private key of node: {:?}", keys.0.as_slice());
let pubkey = PubKey::Ed25519PubKey(keys.1); let pubkey = PubKey::Ed25519PubKey(keys.1);
println!("Public key of node: {:?}", pubkey); log_debug!("Public key of node: {:?}", pubkey);
println!("Private key of node: {:?}", keys.0.as_slice()); log_debug!("Private key of node: {:?}", keys.0.as_slice());
let thr = task::spawn(run_server_accept_one( let thr = task::spawn(run_server_accept_one(
"127.0.0.1", "127.0.0.1",

@ -8,7 +8,16 @@ description = "Daemon of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
p2p-broker = { path = "../p2p-broker" } p2p-broker = { path = "../p2p-broker" }
p2p-net = { path = "../p2p-net" } p2p-net = { path = "../p2p-net" }
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo", features = ["server_log_output"] }
async-std = { version = "1.12.0", features = ["attributes"] } async-std = { version = "1.12.0", features = ["attributes"] }
default-net = "0.15"
log = "0.4"
env_logger = "0.10"
clap = { version = "4.3.4", features = ["derive","env"] }
base64-url = "2.0.0"
slice_as_array = "1.1.0"

@ -0,0 +1,34 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use clap::Parser;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub(crate) struct Cli {
/// List all network interfaces available on the host
#[arg(short('i'), long)]
pub list_interfaces: bool,
/// Increase the logging output. once : info, twice : debug, 3 times : trace
#[arg(short, long, action = clap::ArgAction::Count)]
pub verbose: u8,
/// Base path for server home folder containing all persistent files
#[arg(short, long, default_value = ".ng")]
pub base: String,
/// Master key of the server. Should be a base64-url encoded serde serialization of a [u8; 32]. if not provided, a new key will be generated for you
#[arg(short, long, env = "NG_SERVER_KEY")]
pub key: Option<String>,
/// Saves to disk the provided or automatically generated key. Only used if file storage is secure. Alternatives are passing the key at every start with --key or NG_SERVER_KEY env var.
#[arg(long)]
pub save_key: bool,
}

@ -6,18 +6,239 @@
// at your option. All files in the project carrying such // at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except // notice may not be copied, modified, or distributed except
// according to those terms. // according to those terms.
#[macro_use]
extern crate slice_as_array;
pub mod types;
mod cli;
use crate::cli::*;
use clap::Parser;
use p2p_broker::server_ws::run_server; use p2p_broker::server_ws::run_server;
use p2p_net::utils::{gen_keys, Dual25519Keys, Sensitive, U8Array}; use p2p_broker::utils::*;
use p2p_net::utils::{gen_keys, keys_from_bytes, Dual25519Keys, Sensitive, U8Array};
use p2p_net::WS_PORT; use p2p_net::WS_PORT;
use p2p_repo::log::*;
use p2p_repo::{ use p2p_repo::{
types::{PrivKey, PubKey}, types::{PrivKey, PubKey},
utils::{generate_keypair, keypair_from_ed, sign, verify}, utils::{generate_keypair, keypair_from_ed, sign, verify},
}; };
use std::fs::{read_to_string, write};
use std::io::Read;
use std::io::Write;
use std::io::{BufReader, ErrorKind};
use std::path::{Path, PathBuf};
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum InterfaceType {
Loopback,
Private,
Public,
Invalid,
}
pub fn print_ipv4(ip: &default_net::ip::Ipv4Net) -> String {
format!("{}/{}", ip.addr, ip.prefix_len)
}
pub fn print_ipv6(ip: &default_net::ip::Ipv6Net) -> String {
format!("{}/{}", ip.addr, ip.prefix_len)
}
pub struct Interface {
pub if_type: InterfaceType,
pub name: String,
pub mac_addr: Option<default_net::interface::MacAddr>,
/// List of Ipv4Net for the network interface
pub ipv4: Vec<default_net::ip::Ipv4Net>,
/// List of Ipv6Net for the network interface
pub ipv6: Vec<default_net::ip::Ipv6Net>,
}
pub fn get_interface() -> Vec<Interface> {
let mut res: Vec<Interface> = vec![];
let interfaces = default_net::get_interfaces();
for interface in interfaces {
if interface.ipv4.len() > 0 {
let first_v4 = interface.ipv4[0].addr;
let if_type = if first_v4.is_loopback() {
InterfaceType::Loopback
} else if first_v4.is_private() || first_v4.is_link_local() {
InterfaceType::Private
} else if !first_v4.is_unspecified()
&& !first_v4.is_documentation()
&& !first_v4.is_broadcast()
&& !first_v4.is_multicast()
{
InterfaceType::Public
} else {
InterfaceType::Invalid
};
if if_type == InterfaceType::Invalid {
continue;
}
let interf = Interface {
if_type,
name: interface.name,
mac_addr: interface.mac_addr,
ipv4: interface.ipv4,
ipv6: interface.ipv6,
};
res.push(interf);
}
}
res
}
pub fn print_interfaces() {
let interfaces = get_interface();
for interface in interfaces {
println!("{} \t{:?}", interface.name, interface.if_type);
println!(
"\tIPv4: {}",
interface
.ipv4
.iter()
.map(|ip| print_ipv4(ip))
.collect::<Vec<String>>()
.join(" ")
);
println!(
"\tIPv6: {}",
interface
.ipv6
.iter()
.map(|ip| print_ipv6(ip))
.collect::<Vec<String>>()
.join(" ")
);
if let Some(mac_addr) = interface.mac_addr {
println!("\tMAC: {}", mac_addr);
}
}
}
fn decode_key(key_string: String) -> Result<[u8; 32], ()> {
let vec = base64_url::decode(&key_string).map_err(|_| log_err!("key has invalid content"))?;
Ok(*slice_as_array!(&vec, [u8; 32])
.ok_or(())
.map_err(|_| log_err!("key has invalid content array"))?)
}
#[async_std::main] #[async_std::main]
async fn main() -> std::io::Result<()> { async fn main() -> std::io::Result<()> {
println!("Starting NextGraph daemon..."); let args = Cli::parse();
if args.list_interfaces {
println!("list of network interfaces");
print_interfaces();
return Ok(());
}
if std::env::var("RUST_LOG").is_err() {
if args.verbose == 0 {
std::env::set_var("RUST_LOG", "warn");
} else if args.verbose == 1 {
std::env::set_var("RUST_LOG", "info");
} else if args.verbose == 2 {
std::env::set_var("RUST_LOG", "debug");
} else if args.verbose >= 3 {
std::env::set_var("RUST_LOG", "trace");
}
}
env_logger::init();
log_info!("Starting NextGraph daemon (ngd)");
log_debug!("base {:?}", args.base);
let mut path = PathBuf::from(&args.base);
path.push("server");
if !path.is_absolute() {
path = std::env::current_dir().unwrap().join(path);
}
log_debug!("cur {}", std::env::current_dir().unwrap().display());
log_debug!("home {}", path.to_str().unwrap());
std::fs::create_dir_all(path.clone()).unwrap();
// reading key from file, if any
let mut key_path = path.clone();
key_path.push("key");
let key_from_file: Option<[u8; 32]>;
let res = |key_path| -> Result<[u8; 32], &str> {
let file = read_to_string(key_path).map_err(|_| "")?;
decode_key(
file.lines()
.nth(0)
.ok_or("empty file")?
.to_string()
.trim()
.to_string(),
)
.map_err(|_| "invalid file")
}(&key_path);
if res.is_err() && res.unwrap_err().len() > 0 {
log_err!(
"provided key file is incorrect. {}. aborting start",
res.unwrap_err()
);
return Err(ErrorKind::InvalidInput.into());
}
key_from_file = match res {
Err(_) => None,
Ok(k) => Some(k),
};
let keys: [[u8; 32]; 4] = match args.key {
Some(key_string) => {
if key_from_file.is_some() {
log_err!("provided key option will not be used as a key file is already present");
gen_broker_keys(Some(key_from_file.unwrap()))
} else {
let res = decode_key(key_string);
if res.is_err() {
log_err!("provided key is invalid. cannot start");
return Err(ErrorKind::InvalidInput.into());
}
if args.save_key {
let master_key = base64_url::encode(&res.unwrap());
if let Err(e) = write(key_path.clone(), master_key) {
log_err!("cannot save key to file. aborting start");
return Err(e);
}
log_info!("The key has been saved to {}", key_path.to_str().unwrap());
}
gen_broker_keys(Some(res.unwrap()))
}
}
None => {
if key_from_file.is_some() {
gen_broker_keys(Some(key_from_file.unwrap()))
} else {
let res = gen_broker_keys(None);
let master_key = base64_url::encode(&res[0]);
if args.save_key {
if let Err(e) = write(key_path.clone(), master_key) {
log_err!("cannot save key to file. aborting start");
return Err(e);
}
log_info!("The key has been saved to {}", key_path.to_str().unwrap());
} else {
// on purpose we don't log the key, just print it out stdout, as it should be saved in logger's files
println!("YOUR GENERATED KEY IS: {}", master_key);
log_err!("At your request, the key wasn't saved.");
log_err!("provide it again to the next start of ngd with --key option or NG_SERVER_KEY env variable");
}
res
}
}
};
println!("{:?}", keys);
// let keys = gen_keys(); // let keys = gen_keys();
// let pub_key = PubKey::Ed25519PubKey(keys.1); // let pub_key = PubKey::Ed25519PubKey(keys.1);
@ -32,20 +253,23 @@ async fn main() -> std::io::Result<()> {
// let privkey = duals.x25519_priv; // let privkey = duals.x25519_priv;
// let pubkey = PubKey::Ed25519PubKey(duals.x25519_public); // let pubkey = PubKey::Ed25519PubKey(duals.x25519_public);
// println!("Public key of node: {:?}", keys.1); // log_debug!("Public key of node: {:?}", keys.1);
// println!("Private key of node: {:?}", keys.0.as_slice()); // log_debug!("Private key of node: {:?}", keys.0.as_slice());
let pubkey = PubKey::Ed25519PubKey([
95, 155, 249, 202, 41, 105, 71, 51, 206, 126, 9, 84, 132, 92, 60, 7, 74, 179, 46, 21, 21, let (privkey, pubkey) = keys_from_bytes(keys[1]);
242, 171, 27, 249, 79, 76, 176, 168, 43, 83, 2,
]); // let pubkey = PubKey::Ed25519PubKey([
let privkey = Sensitive::<[u8; 32]>::from_slice(&[ // 95, 155, 249, 202, 41, 105, 71, 51, 206, 126, 9, 84, 132, 92, 60, 7, 74, 179, 46, 21, 21,
56, 86, 36, 0, 109, 59, 152, 66, 166, 71, 201, 20, 119, 64, 173, 99, 215, 52, 40, 189, 96, // 242, 171, 27, 249, 79, 76, 176, 168, 43, 83, 2,
142, 3, 134, 167, 187, 235, 4, 39, 26, 31, 119, // ]);
]); // let privkey = Sensitive::<[u8; 32]>::from_slice(&[
// 56, 86, 36, 0, 109, 59, 152, 66, 166, 71, 201, 20, 119, 64, 173, 99, 215, 52, 40, 189, 96,
println!("Public key of node: {:?}", pubkey); // 142, 3, 134, 167, 187, 235, 4, 39, 26, 31, 119,
println!("Private key of node: {:?}", privkey.as_slice()); // ]);
run_server("127.0.0.1", WS_PORT, privkey, pubkey).await?;
log_debug!("Public key of node: {:?}", pubkey);
log_debug!("Private key of node: {:?}", privkey.as_slice());
run_server("127.0.0.1", WS_PORT, privkey, pubkey, path).await?;
Ok(()) Ok(())
} }

@ -0,0 +1,27 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_broker::types::BrokerOverlayConfig;
use p2p_broker::types::ListenerV0;
use p2p_repo::types::PrivKey;
use serde::{Deserialize, Serialize};
/// DaemonConfig Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DaemonConfigV0 {
/// List of listeners for TCP (HTTP) incoming connections
pub listeners: Vec<ListenerV0>,
pub overlays_config: BrokerOverlayConfig,
}
/// Daemon config
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum DaemonConfig {
V0(DaemonConfigV0),
}

@ -15,7 +15,7 @@ rust-embed = "6"
log = "0.4" log = "0.4"
env_logger = "0.10" env_logger = "0.10"
stores-lmdb = { path = "../stores-lmdb" } stores-lmdb = { path = "../stores-lmdb" }
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo", features = ["server_log_output"] }
p2p-net = { path = "../p2p-net" } p2p-net = { path = "../p2p-net" }
ng-wallet = { path = "../ng-wallet" } ng-wallet = { path = "../ng-wallet" }
serde = { version = "1.0.142", features = ["derive"] } serde = { version = "1.0.142", features = ["derive"] }
@ -25,5 +25,4 @@ serde-big-array = "0.5.1"
base64-url = "2.0.0" base64-url = "2.0.0"
slice_as_array = "1.1.0" slice_as_array = "1.1.0"
serde_json = "1.0.96" serde_json = "1.0.96"
debug_print = "1.0.0"
bytes = "1.0" bytes = "1.0"

@ -16,7 +16,6 @@ use p2p_repo::store::StorageError;
use warp::reply::Response; use warp::reply::Response;
use warp::{Filter, Reply}; use warp::{Filter, Reply};
use debug_print::debug_println;
use rust_embed::RustEmbed; use rust_embed::RustEmbed;
use serde_bare::{from_slice, to_vec}; use serde_bare::{from_slice, to_vec};
use serde_json::json; use serde_json::json;
@ -26,6 +25,7 @@ use std::{env, fs};
use crate::store::wallet_record::*; use crate::store::wallet_record::*;
use crate::types::*; use crate::types::*;
use ng_wallet::types::*; use ng_wallet::types::*;
use p2p_repo::log::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, sign, verify}; use p2p_repo::utils::{generate_keypair, sign, verify};
use stores_lmdb::kcv_store::LmdbKCVStore; use stores_lmdb::kcv_store::LmdbKCVStore;
@ -44,7 +44,7 @@ impl Server {
let bootstrap = add_wallet.bootstrap(); let bootstrap = add_wallet.bootstrap();
debug_println!("ADDING wallet {}", bootstrap.id()); log_debug!("ADDING wallet {}", bootstrap.id());
verify( verify(
&bootstrap.content_as_bytes(), &bootstrap.content_as_bytes(),
@ -85,7 +85,7 @@ impl Server {
} }
fn get_wallet(&self, encoded_id: String) -> Result<Response, NgHttpError> { fn get_wallet(&self, encoded_id: String) -> Result<Response, NgHttpError> {
debug_println!("DOWNLOAD wallet {}", encoded_id); log_debug!("DOWNLOAD wallet {}", encoded_id);
let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?; let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?;
let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?; let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?;
let wallet_id = PubKey::Ed25519PubKey(*array); let wallet_id = PubKey::Ed25519PubKey(*array);
@ -104,7 +104,7 @@ impl Server {
} }
fn get_bootstrap(&self, encoded_id: String) -> Result<Response, NgHttpError> { fn get_bootstrap(&self, encoded_id: String) -> Result<Response, NgHttpError> {
debug_println!("DOWNLOAD bootstrap {}", encoded_id); log_debug!("DOWNLOAD bootstrap {}", encoded_id);
let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?; let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?;
let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?; let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?;
@ -127,7 +127,7 @@ impl Server {
// pub fn create_wallet_record(&self, bootstrap: &Bootstrap) { // pub fn create_wallet_record(&self, bootstrap: &Bootstrap) {
// let wallet = WalletRecord::create(&bootstrap.id(), bootstrap, &self.store).unwrap(); // let wallet = WalletRecord::create(&bootstrap.id(), bootstrap, &self.store).unwrap();
// println!( // log_debug!(
// "wallet created {}", // "wallet created {}",
// base64_url::encode(&wallet.id().slice()) // base64_url::encode(&wallet.id().slice())
// ); // );
@ -135,7 +135,7 @@ impl Server {
// pub fn open_wallet_record(&self, wallet_id: &WalletId) -> WalletRecord { // pub fn open_wallet_record(&self, wallet_id: &WalletId) -> WalletRecord {
// let wallet2 = WalletRecord::open(wallet_id, &self.store).unwrap(); // let wallet2 = WalletRecord::open(wallet_id, &self.store).unwrap();
// println!( // log_debug!(
// "wallet opened {}", // "wallet opened {}",
// base64_url::encode(&wallet2.id().slice()) // base64_url::encode(&wallet2.id().slice())
// ); // );
@ -155,7 +155,7 @@ async fn main() {
dir.push(path_str); dir.push(path_str);
// FIXME: use a real key for encryption at rest // FIXME: use a real key for encryption at rest
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
println!("{}", dir.to_str().unwrap()); log_debug!("data directory: {}", dir.to_str().unwrap());
fs::create_dir_all(dir.clone()).unwrap(); fs::create_dir_all(dir.clone()).unwrap();
let store = LmdbKCVStore::open(&dir, key); let store = LmdbKCVStore::open(&dir, key);
@ -209,7 +209,7 @@ async fn main() {
} }
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
{ {
println!("ANY ORIGIN"); log_debug!("CORS: any origin");
cors = cors.allow_any_origin(); cors = cors.allow_any_origin();
} }
log::info!("Starting server on http://localhost:3030"); log::info!("Starting server on http://localhost:3030");

@ -8,7 +8,6 @@ description = "P2P Broker module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" } p2p-net = { path = "../p2p-net" }
p2p-client-ws = { path = "../p2p-client-ws" } p2p-client-ws = { path = "../p2p-client-ws" }
@ -20,9 +19,16 @@ serde_bytes = "0.11.7"
async-std = { version = "1.12.0", features = ["attributes"] } async-std = { version = "1.12.0", features = ["attributes"] }
futures = "0.3.24" futures = "0.3.24"
rust-fsm = "0.6.0" rust-fsm = "0.6.0"
getrandom = "0.2.7"
async-channel = "1.7.1" async-channel = "1.7.1"
tempfile = "3" tempfile = "3"
hex = "0.4.3" hex = "0.4.3"
async-trait = "0.1.64" async-trait = "0.1.64"
async-tungstenite = { version = "0.17.2", features = ["async-std-runtime"] } async-tungstenite = { version = "0.17.2", features = ["async-std-runtime"] }
blake3 = "1.3.1"
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2.7"
features = ["js"]
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
getrandom = "0.2.7"

@ -1,3 +1,7 @@
pub mod broker_store; pub mod broker_store;
pub mod server_ws; pub mod server_ws;
pub mod types;
pub mod utils;

@ -23,7 +23,6 @@ use crate::broker_store::overlay::Overlay;
use crate::broker_store::peer::Peer; use crate::broker_store::peer::Peer;
use crate::broker_store::repostoreinfo::RepoStoreInfo; use crate::broker_store::repostoreinfo::RepoStoreInfo;
use async_std::task; use async_std::task;
use debug_print::*;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::future::OptionFuture; use futures::future::OptionFuture;
use futures::FutureExt; use futures::FutureExt;
@ -31,6 +30,7 @@ use futures::Stream;
use p2p_net::actors::*; use p2p_net::actors::*;
use p2p_net::errors::*; use p2p_net::errors::*;
use p2p_net::types::*; use p2p_net::types::*;
use p2p_repo::log::*;
use p2p_repo::object::Object; use p2p_repo::object::Object;
use p2p_repo::store::RepoStore; use p2p_repo::store::RepoStore;
use p2p_repo::store::StorageError; use p2p_repo::store::StorageError;
@ -119,7 +119,7 @@ impl ProtocolHandler {
Result<Vec<u8>, ProtocolError>, Result<Vec<u8>, ProtocolError>,
OptionFuture<BoxFuture<'static, u16>>, OptionFuture<BoxFuture<'static, u16>>,
) { ) {
//debug_println!("SERVER PROTOCOL {:?}", &self.protocol); //log_debug!("SERVER PROTOCOL {:?}", &self.protocol);
match &self.protocol { match &self.protocol {
ProtocolType::Start => { ProtocolType::Start => {
let message = serde_bare::from_slice::<StartProtocol>(&frame); let message = serde_bare::from_slice::<StartProtocol>(&frame);
@ -636,9 +636,9 @@ impl BrokerServer {
// // we need to open/create it // // we need to open/create it
// // first let's find it in the KCVStore.overlay table to retrieve its repo_pubkey // // first let's find it in the KCVStore.overlay table to retrieve its repo_pubkey
// debug_println!("searching for overlayId {}", overlay_id); // log_debug!("searching for overlayId {}", overlay_id);
// let overlay = Overlay::open(overlay_id, &self.store)?; // let overlay = Overlay::open(overlay_id, &self.store)?;
// debug_println!("found overlayId {}", overlay_id); // log_debug!("found overlayId {}", overlay_id);
// let repo_id = overlay.repo()?; // let repo_id = overlay.repo()?;
// let repostore_id = RepoStoreId::Repo(repo_id); // let repostore_id = RepoStoreId::Repo(repo_id);
// let mut writer = self // let mut writer = self
@ -676,7 +676,7 @@ impl BrokerServer {
user_id: PubKey, user_id: PubKey,
sig: Sig, sig: Sig,
) -> Result<(), ProtocolError> { ) -> Result<(), ProtocolError> {
debug_println!("ADDING USER {}", user_id); log_debug!("ADDING USER {}", user_id);
// TODO add is_admin boolean // TODO add is_admin boolean
// TODO check that admin_user is indeed an admin // TODO check that admin_user is indeed an admin
@ -855,7 +855,7 @@ impl BrokerServer {
} }
// TODO use a task to send non blocking (streaming) // TODO use a task to send non blocking (streaming)
let o = obj.ok().unwrap(); let o = obj.ok().unwrap();
//debug_println!("{} BLOCKS ", o.blocks().len()); //log_debug!("{} BLOCKS ", o.blocks().len());
let mut deduplicated: HashSet<BlockId> = HashSet::new(); let mut deduplicated: HashSet<BlockId> = HashSet::new();
for block in o.blocks() { for block in o.blocks() {
let id = block.id(); let id = block.id();
@ -878,9 +878,9 @@ impl BrokerServer {
known_heads: &Vec<ObjectId>, known_heads: &Vec<ObjectId>,
known_commits: &BloomFilter, known_commits: &BloomFilter,
) -> Result<async_channel::Receiver<Block>, ProtocolError> { ) -> Result<async_channel::Receiver<Block>, ProtocolError> {
//debug_println!("heads {:?}", heads); //log_debug!("heads {:?}", heads);
//debug_println!("known_heads {:?}", known_heads); //log_debug!("known_heads {:?}", known_heads);
//debug_println!("known_commits {:?}", known_commits); //log_debug!("known_commits {:?}", known_commits);
self.get_repostore_from_overlay_id(&overlay, |store| { self.get_repostore_from_overlay_id(&overlay, |store| {
let (s, r) = async_channel::unbounded::<Block>(); let (s, r) = async_channel::unbounded::<Block>();
@ -889,7 +889,7 @@ impl BrokerServer {
.map_err(|e| ProtocolError::ObjectParseError)?; .map_err(|e| ProtocolError::ObjectParseError)?;
// todo, use a task to send non blocking (streaming) // todo, use a task to send non blocking (streaming)
debug_println!("SYNCING {} COMMITS", res.len()); log_debug!("SYNCING {} COMMITS", res.len());
let mut deduplicated: HashSet<BlockId> = HashSet::new(); let mut deduplicated: HashSet<BlockId> = HashSet::new();
@ -925,7 +925,7 @@ impl BrokerServer {
peers: &Vec<PeerAdvert>, peers: &Vec<PeerAdvert>,
) -> Result<(), ProtocolError> { ) -> Result<(), ProtocolError> {
// check if this overlay already exists // check if this overlay already exists
//debug_println!("SEARCHING OVERLAY"); //log_debug!("SEARCHING OVERLAY");
let overlay_res = Overlay::open(&overlay_id, &self.store); let overlay_res = Overlay::open(&overlay_id, &self.store);
let overlay = match overlay_res { let overlay = match overlay_res {
Err(StorageError::NotFound) => { Err(StorageError::NotFound) => {
@ -949,24 +949,24 @@ impl BrokerServer {
let key = SymKey::ChaCha20Key(random_buf); let key = SymKey::ChaCha20Key(random_buf);
let _ = RepoStoreInfo::create(&overlay_id, &key, &self.store)?; // TODO in case of error, delete the previously created Overlay let _ = RepoStoreInfo::create(&overlay_id, &key, &self.store)?; // TODO in case of error, delete the previously created Overlay
//debug_println!("KEY ADDED"); //log_debug!("KEY ADDED");
over over
} }
Err(e) => return Err(e.into()), Err(e) => return Err(e.into()),
Ok(overlay) => overlay, Ok(overlay) => overlay,
}; };
//debug_println!("OVERLAY FOUND"); //log_debug!("OVERLAY FOUND");
// add the peers to the overlay // add the peers to the overlay
for advert in peers { for advert in peers {
Peer::update_or_create(advert, &self.store)?; Peer::update_or_create(advert, &self.store)?;
overlay.add_peer(&advert.peer())?; overlay.add_peer(&advert.peer())?;
} }
//debug_println!("PEERS ADDED"); //log_debug!("PEERS ADDED");
// now adding the overlay_id to the account // now adding the overlay_id to the account
let account = Account::open(&user, &self.store)?; // TODO in case of error, delete the previously created Overlay let account = Account::open(&user, &self.store)?; // TODO in case of error, delete the previously created Overlay
account.add_overlay(&overlay_id)?; account.add_overlay(&overlay_id)?;
//debug_println!("USER <-> OVERLAY"); //log_debug!("USER <-> OVERLAY");
//TODO: connect to peers //TODO: connect to peers

@ -17,21 +17,22 @@ use async_std::sync::Mutex;
use async_std::task; use async_std::task;
use async_tungstenite::accept_async; use async_tungstenite::accept_async;
use async_tungstenite::tungstenite::protocol::Message; use async_tungstenite::tungstenite::protocol::Message;
use debug_print::*;
use futures::{SinkExt, StreamExt}; use futures::{SinkExt, StreamExt};
use p2p_client_ws::remote_ws::ConnectionWebSocket; use p2p_client_ws::remote_ws::ConnectionWebSocket;
use p2p_net::broker::*; use p2p_net::broker::*;
use p2p_net::connection::IAccept; use p2p_net::connection::IAccept;
use p2p_net::types::IP; use p2p_net::types::IP;
use p2p_net::utils::Sensitive; use p2p_net::utils::Sensitive;
use p2p_repo::log::*;
use p2p_repo::types::{PrivKey, PubKey}; use p2p_repo::types::{PrivKey, PubKey};
use p2p_repo::utils::generate_keypair; use p2p_repo::utils::generate_keypair;
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use std::fs; use std::fs;
use std::ops::Deref; use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use stores_lmdb::kcv_store::LmdbKCVStore;
use stores_lmdb::repo_store::LmdbRepoStore;
use tempfile::Builder; use tempfile::Builder;
pub async fn accept(tcp: TcpStream, peer_priv_key: Sensitive<[u8; 32]>, peer_pub_key: PubKey) { pub async fn accept(tcp: TcpStream, peer_priv_key: Sensitive<[u8; 32]>, peer_pub_key: PubKey) {
@ -63,7 +64,7 @@ pub async fn run_server_accept_one(
let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap(); let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap();
let master_key: [u8; 32] = [0; 32]; let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap(); std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("data directory: {}", root.path().to_str().unwrap());
let store = LmdbKCVStore::open(root.path(), master_key); let store = LmdbKCVStore::open(root.path(), master_key);
// TODO: remove this part // TODO: remove this part
@ -72,7 +73,7 @@ pub async fn run_server_accept_one(
// let server_arc = Arc::new(server); // let server_arc = Arc::new(server);
let socket = TcpListener::bind(addrs.as_str()).await?; let socket = TcpListener::bind(addrs.as_str()).await?;
debug_println!("Listening on {}", addrs.as_str()); log_debug!("Listening on {}", addrs.as_str());
let mut connections = socket.incoming(); let mut connections = socket.incoming();
let tcp = connections.next().await.unwrap()?; let tcp = connections.next().await.unwrap()?;
@ -87,13 +88,18 @@ pub async fn run_server(
port: u16, port: u16,
peer_priv_key: Sensitive<[u8; 32]>, peer_priv_key: Sensitive<[u8; 32]>,
peer_pub_key: PubKey, peer_pub_key: PubKey,
mut path: PathBuf,
) -> std::io::Result<()> { ) -> std::io::Result<()> {
let addrs = format!("{}:{}", addr, port); let addrs = format!("{}:{}", addr, port);
let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap(); //let root = tempfile::Builder::new().prefix("ngd").tempdir().unwrap();
path.push("storage");
std::fs::create_dir_all(path.clone()).unwrap();
//log::info!("Home directory is {}");
let master_key: [u8; 32] = [0; 32]; let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); let store = LmdbKCVStore::open(&path, master_key);
let store = LmdbKCVStore::open(root.path(), master_key);
// TODO: remove this part // TODO: remove this part
// let server: BrokerServer = // let server: BrokerServer =
@ -101,7 +107,7 @@ pub async fn run_server(
// let server_arc = Arc::new(server); // let server_arc = Arc::new(server);
let socket = TcpListener::bind(addrs.as_str()).await?; let socket = TcpListener::bind(addrs.as_str()).await?;
debug_println!("Listening on {}", addrs.as_str()); log_debug!("Listening on {}", addrs.as_str());
let mut connections = socket.incoming(); let mut connections = socket.incoming();
while let Some(tcp) = connections.next().await { while let Some(tcp) = connections.next().await {

@ -0,0 +1,92 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_net::types::{BindAddress, BrokerServerV0, OverlayId, UserId, IP};
use p2p_repo::types::PrivKey;
use serde::{Deserialize, Serialize};
/// AcceptForwardForV0 type
/// allow answers to connection requests originating from a client behind a reverse proxy
/// Format of last param in the tuple is a list of comma separated hosts or CIDR subnetworks IPv4 and/or IPv6 addresses accepted as X-Forwarded-For
/// Empty string means all addresses are accepted
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub enum AcceptForwardForV0 {
/// X-Forwarded-For not allowed
No,
/// X-Forwarded-For accepted only for clients with private LAN addresses. First param is the bind address of the proxy server
Private((BindAddress, String)),
/// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server
/// domain can take an option port with a trailing `:port`
PublicDomain((String, String)),
/// X-Forwarded-For accepted only for clients with public addresses. First param is the domain of the proxy server
/// domain can take an option port with a trailing `:port`
/// second param is the privKey of the PeerId of the proxy server, useful when the proxy server is load balancing to several daemons
/// that should all use the same PeerId to answer requests
PublicDomainPeer((String, PrivKey, String)),
PublicDyn((u16, u32, String)), // first param is the port, second param in tuple is the interval for periodic probe of the external IP
PublicStatic((BindAddress, String)),
}
/// DaemonConfig Listener Version 0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ListenerV0 {
/// local interface name to bind to
/// names of interfaces can be retrieved with the --list-interfaces option
/// the string can take an optional trailing option of the form `:3600` for number of seconds
/// for an interval periodic refresh of the actual IP(s) of the interface. Used for dynamic IP interfaces.
pub interface_name: String,
// if to bind to the ipv6 address of the interface
pub ipv6: bool,
/// local port to listen on
pub port: u16,
// will answer a probe coming from private LAN and if is_private, with its own peerId, so that guests on the network will be able to connect.
pub discoverable: bool,
/// Answers to connection requests originating from a direct client, without X-Forwarded-For headers
/// Can be used in combination with a accept_forward_for config, when a local daemon is behind a proxy, and also serves as broker for local apps/webbrowsers
pub accept_direct: bool,
/// X-Forwarded-For config. only valid if IP/interface is localhost or private
pub accept_forward_for: AcceptForwardForV0,
// impl fn is_private()
// returns false if public IP in interface, or if PublicDyn, PublicStatic
// if the ip is local or private, and the forwarding is not PublicDyn nor PublicStatic, (if is_private) then the app is served on HTTP get of /
// an interface with no accept_forward_for and no accept_direct, is de facto, disabled
}
/// Broker Overlay Permission
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum BrokerOverlayPermission {
Nobody,
Anybody,
AllRegisteredUser,
UsersList(Vec<UserId>),
}
/// Broker Overlay Config
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BrokerOverlayConfig {
// list of overlays this config applies to. empty array means applying to all
pub overlays: Vec<OverlayId>,
// Who can ask to join an overlay on the core protocol
pub core: BrokerOverlayPermission,
// Who can connect as a client to this server
pub server: BrokerOverlayPermission,
// if core == Nobody and server == Nobody then the listeners will not be started
// are ExtRequest allowed on the server? this requires the core to be ON.
pub allow_read: bool,
/// an empty list means to forward to the peer known for each overlay.
/// forward becomes the default when core is disabled
pub forward: Vec<BrokerServerV0>,
}

@ -0,0 +1,31 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_repo::log::*;
pub fn gen_broker_keys(key: Option<[u8; 32]>) -> [[u8; 32]; 4] {
let key = match key {
None => {
let mut master_key = [0u8; 32];
log_warn!("gen_broker_keys: No key provided, generating one");
getrandom::getrandom(&mut master_key).expect("getrandom failed");
master_key
}
Some(k) => k,
};
let peerid: [u8; 32];
let wallet: [u8; 32];
let sig: [u8; 32];
peerid = blake3::derive_key("NextGraph Broker BLAKE3 key PeerId privkey", &key);
wallet = blake3::derive_key("NextGraph Broker BLAKE3 key wallet encryption", &key);
sig = blake3::derive_key("NextGraph Broker BLAKE3 key config signature", &key);
[key, peerid, wallet, sig]
}

@ -8,7 +8,6 @@ description = "P2P Client module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" } p2p-net = { path = "../p2p-net" }
chacha20 = "0.9.0" chacha20 = "0.9.0"

@ -17,7 +17,7 @@ macro_rules! before {
.map_err(|_e| ProtocolError::ActorError)?; .map_err(|_e| ProtocolError::ActorError)?;
let $request_id = $addr.actor_id(); let $request_id = $addr.actor_id();
//debug_println!("actor ID {}", $request_id); //log_debug!("actor ID {}", $request_id);
{ {
let mut map = $self.actors.write().expect("RwLock poisoned"); let mut map = $self.actors.write().expect("RwLock poisoned");
@ -28,7 +28,7 @@ macro_rules! before {
macro_rules! after { macro_rules! after {
( $self:expr, $request_id:ident, $addr:ident, $receiver:ident, $reply:ident ) => { ( $self:expr, $request_id:ident, $addr:ident, $receiver:ident, $reply:ident ) => {
//debug_println!("waiting for reply"); //log_debug!("waiting for reply");
$addr.wait_for_stop().await; // TODO add timeout and close connection if there's no reply $addr.wait_for_stop().await; // TODO add timeout and close connection if there's no reply
let r = $receiver.await; let r = $receiver.await;
@ -36,7 +36,7 @@ macro_rules! after {
return Err(ProtocolError::Closing); return Err(ProtocolError::Closing);
} }
let $reply = r.unwrap(); let $reply = r.unwrap();
//debug_println!("reply arrived {:?}", $reply); //log_debug!("reply arrived {:?}", $reply);
{ {
let mut map = $self.actors.write().expect("RwLock poisoned"); let mut map = $self.actors.write().expect("RwLock poisoned");
map.remove(&$request_id); map.remove(&$request_id);

@ -17,7 +17,6 @@ use async_std::net::TcpStream;
use async_tungstenite::tungstenite::protocol::frame::coding::CloseCode; use async_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
use async_tungstenite::tungstenite::protocol::CloseFrame; use async_tungstenite::tungstenite::protocol::CloseFrame;
use async_tungstenite::WebSocketStream; use async_tungstenite::WebSocketStream;
use debug_print::*;
use async_std::sync::Mutex; use async_std::sync::Mutex;
use futures::io::Close; use futures::io::Close;
@ -26,10 +25,10 @@ use futures::{FutureExt, SinkExt};
use async_std::task; use async_std::task;
use p2p_net::errors::*; use p2p_net::errors::*;
use p2p_net::log;
use p2p_net::types::*; use p2p_net::types::*;
use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend, Sender, Sensitive}; use p2p_net::utils::{spawn_and_log_error, Receiver, ResultSend, Sender, Sensitive};
use p2p_net::{connection::*, WS_PORT}; use p2p_net::{connection::*, WS_PORT};
use p2p_repo::log::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp}; use p2p_repo::utils::{generate_keypair, now_timestamp};
@ -57,7 +56,7 @@ impl IConnect for ConnectionWebSocket {
match (res) { match (res) {
Err(e) => { Err(e) => {
debug_println!("Cannot connect: {:?}", e); log_debug!("Cannot connect: {:?}", e);
Err(NetError::ConnectionError) Err(NetError::ConnectionError)
} }
Ok((mut websocket, _)) => { Ok((mut websocket, _)) => {
@ -67,14 +66,14 @@ impl IConnect for ConnectionWebSocket {
let mut shutdown = cnx.set_shutdown(); let mut shutdown = cnx.set_shutdown();
let join = task::spawn(async move { let join = task::spawn(async move {
debug_println!("START of WS loop"); log_debug!("START of WS loop");
let res = ws_loop(websocket, s, r).await; let res = ws_loop(websocket, s, r).await;
if res.is_err() { if res.is_err() {
let _ = shutdown.send(res.err().unwrap()).await; let _ = shutdown.send(res.err().unwrap()).await;
} }
debug_println!("END of WS loop"); log_debug!("END of WS loop");
}); });
cnx.start(config).await; cnx.start(config).await;
@ -103,14 +102,14 @@ impl IAccept for ConnectionWebSocket {
let mut shutdown = cnx.set_shutdown(); let mut shutdown = cnx.set_shutdown();
let join = task::spawn(async move { let join = task::spawn(async move {
debug_println!("START of WS loop"); log_debug!("START of WS loop");
let res = ws_loop(socket, s, r).await; let res = ws_loop(socket, s, r).await;
if res.is_err() { if res.is_err() {
let _ = shutdown.send(res.err().unwrap()).await; let _ = shutdown.send(res.err().unwrap()).await;
} }
debug_println!("END of WS loop"); log_debug!("END of WS loop");
}); });
Ok(cnx) Ok(cnx)
} }
@ -122,7 +121,7 @@ async fn close_ws(
code: u16, code: u16,
reason: &str, reason: &str,
) -> Result<(), NetError> { ) -> Result<(), NetError> {
log!("close_ws {:?}", code); log_info!("close_ws {:?}", code);
let cmd = if code == 1000 { let cmd = if code == 1000 {
ConnectionCommand::Close ConnectionCommand::Close
@ -133,7 +132,7 @@ async fn close_ws(
} else { } else {
ConnectionCommand::Error(NetError::try_from(code - 4949).unwrap()) ConnectionCommand::Error(NetError::try_from(code - 4949).unwrap())
}; };
log!("sending to read loop {:?}", cmd); log_info!("sending to read loop {:?}", cmd);
let _ = futures::SinkExt::send(receiver, cmd).await; let _ = futures::SinkExt::send(receiver, cmd).await;
stream stream
@ -162,11 +161,11 @@ async fn ws_loop(
select! { select! {
r = stream.next().fuse() => match r { r = stream.next().fuse() => match r {
Some(Ok(msg)) => { Some(Ok(msg)) => {
//log!("GOT MESSAGE {:?}", msg); //log_info!("GOT MESSAGE {:?}", msg);
if msg.is_close() { if msg.is_close() {
if let Message::Close(Some(cf)) = msg { if let Message::Close(Some(cf)) = msg {
log!("CLOSE from remote with closeframe: {}",cf.reason); log_info!("CLOSE from remote with closeframe: {}",cf.reason);
let last_command = match cf.code { let last_command = match cf.code {
CloseCode::Normal => CloseCode::Normal =>
ConnectionCommand::Close, ConnectionCommand::Close,
@ -185,7 +184,7 @@ async fn ws_loop(
} }
else { else {
let _ = futures::SinkExt::send(receiver, ConnectionCommand::Close).await; let _ = futures::SinkExt::send(receiver, ConnectionCommand::Close).await;
log!("CLOSE from remote"); log_info!("CLOSE from remote");
} }
return Ok(ProtocolError::Closing); return Ok(ProtocolError::Closing);
} else { } else {
@ -193,12 +192,12 @@ async fn ws_loop(
.map_err(|_e| NetError::IoError)?; .map_err(|_e| NetError::IoError)?;
} }
}, },
Some(Err(e)) => {log!("GOT ERROR {:?}",e);return Err(NetError::WsError);}, Some(Err(e)) => {log_info!("GOT ERROR {:?}",e);return Err(NetError::WsError);},
None => break None => break
}, },
s = sender.next().fuse() => match s { s = sender.next().fuse() => match s {
Some(msg) => { Some(msg) => {
//log!("SENDING MESSAGE {:?}", msg); //log_info!("SENDING MESSAGE {:?}", msg);
match msg { match msg {
ConnectionCommand::Msg(m) => { ConnectionCommand::Msg(m) => {
futures::SinkExt::send(&mut stream,Message::binary(serde_bare::to_vec(&m)?)).await.map_err(|_e| NetError::IoError)?; futures::SinkExt::send(&mut stream,Message::binary(serde_bare::to_vec(&m)?)).await.map_err(|_e| NetError::IoError)?;
@ -223,7 +222,7 @@ async fn ws_loop(
match inner_loop(&mut ws, sender, &mut receiver).await { match inner_loop(&mut ws, sender, &mut receiver).await {
Ok(proto_err) => { Ok(proto_err) => {
if proto_err == ProtocolError::Closing { if proto_err == ProtocolError::Closing {
log!("ProtocolError::Closing"); log_info!("ProtocolError::Closing");
let _ = ws.close(None).await; let _ = ws.close(None).await;
} else if proto_err == ProtocolError::NoError { } else if proto_err == ProtocolError::NoError {
close_ws(&mut ws, &mut receiver, 1000, "").await?; close_ws(&mut ws, &mut receiver, 1000, "").await?;
@ -259,7 +258,7 @@ mod test {
use p2p_net::errors::NetError; use p2p_net::errors::NetError;
use p2p_net::types::IP; use p2p_net::types::IP;
use p2p_net::utils::{spawn_and_log_error, ResultSend}; use p2p_net::utils::{spawn_and_log_error, ResultSend};
use p2p_net::{log, sleep}; use p2p_repo::log::*;
use p2p_repo::utils::generate_keypair; use p2p_repo::utils::generate_keypair;
use std::net::IpAddr; use std::net::IpAddr;
use std::str::FromStr; use std::str::FromStr;
@ -280,7 +279,7 @@ mod test {
let (client_priv_key, client_pub_key) = generate_keypair(); let (client_priv_key, client_pub_key) = generate_keypair();
let (user_priv_key, user_pub_key) = generate_keypair(); let (user_priv_key, user_pub_key) = generate_keypair();
log!("start connecting"); log_info!("start connecting");
{ {
let res = BROKER let res = BROKER
.write() .write()
@ -299,7 +298,7 @@ mod test {
}), }),
) )
.await; .await;
log!("broker.connect : {:?}", res); log_info!("broker.connect : {:?}", res);
res.expect("assume the connection succeeds"); res.expect("assume the connection succeeds");
} }
@ -308,7 +307,7 @@ mod test {
async fn timer_close(remote_peer_id: DirectPeerId) -> ResultSend<()> { async fn timer_close(remote_peer_id: DirectPeerId) -> ResultSend<()> {
async move { async move {
sleep!(std::time::Duration::from_secs(3)); sleep!(std::time::Duration::from_secs(3));
log!("timeout"); log_info!("timeout");
BROKER BROKER
.write() .write()
.await .await

@ -15,10 +15,10 @@ use futures::FutureExt;
use futures::{future, pin_mut, select, stream, SinkExt, StreamExt}; use futures::{future, pin_mut, select, stream, SinkExt, StreamExt};
use p2p_net::connection::*; use p2p_net::connection::*;
use p2p_net::errors::*; use p2p_net::errors::*;
use p2p_net::log;
use p2p_net::types::*; use p2p_net::types::*;
use p2p_net::utils::*; use p2p_net::utils::*;
use p2p_net::WS_PORT; use p2p_net::WS_PORT;
use p2p_repo::log::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp}; use p2p_repo::utils::{generate_keypair, now_timestamp};
use std::sync::Arc; use std::sync::Arc;
@ -47,7 +47,7 @@ impl IConnect for ConnectionWebSocket {
let url = format!("ws://{}:{}", ip, WS_PORT); let url = format!("ws://{}:{}", ip, WS_PORT);
let (mut ws, wsio) = WsMeta::connect(url, None).await.map_err(|e| { let (mut ws, wsio) = WsMeta::connect(url, None).await.map_err(|e| {
//log!("{:?}", e); //log_info!("{:?}", e);
NetError::ConnectionError NetError::ConnectionError
})?; })?;
@ -85,7 +85,7 @@ async fn ws_loop(
select! { select! {
r = stream.next().fuse() => match r { r = stream.next().fuse() => match r {
Some(msg) => { Some(msg) => {
log!("GOT MESSAGE {:?}", msg); log_info!("GOT MESSAGE {:?}", msg);
if let WsMessage::Binary(b) = msg { if let WsMessage::Binary(b) = msg {
receiver.send(ConnectionCommand::Msg(serde_bare::from_slice::<ProtocolMessage>(&b)?)).await receiver.send(ConnectionCommand::Msg(serde_bare::from_slice::<ProtocolMessage>(&b)?)).await
.map_err(|_e| NetError::IoError)?; .map_err(|_e| NetError::IoError)?;
@ -98,11 +98,11 @@ async fn ws_loop(
}, },
s = sender.next().fuse() => match s { s = sender.next().fuse() => match s {
Some(msg) => { Some(msg) => {
log!("SENDING MESSAGE {:?}", msg); log_info!("SENDING MESSAGE {:?}", msg);
match msg { match msg {
ConnectionCommand::Msg(m) => { ConnectionCommand::Msg(m) => {
stream.send(WsMessage::Binary(serde_bare::to_vec(&m)?)).await.map_err(|e| { log!("{:?}",e); return NetError::IoError;})?; stream.send(WsMessage::Binary(serde_bare::to_vec(&m)?)).await.map_err(|e| { log_info!("{:?}",e); return NetError::IoError;})?;
}, },
ConnectionCommand::Error(e) => { ConnectionCommand::Error(e) => {
@ -122,7 +122,7 @@ async fn ws_loop(
} }
Ok(ProtocolError::NoError) Ok(ProtocolError::NoError)
} }
log!("START of WS loop"); log_info!("START of WS loop");
let mut events = ws let mut events = ws
.observe(ObserveConfig::default()) .observe(ObserveConfig::default())
//.observe(Filter::Pointer(WsEvent::is_closed).into()) //.observe(Filter::Pointer(WsEvent::is_closed).into())
@ -132,9 +132,9 @@ async fn ws_loop(
Ok(proto_err) => { Ok(proto_err) => {
if proto_err == ProtocolError::NoError { if proto_err == ProtocolError::NoError {
let _ = ws.close_code(1000).await; //.map_err(|_e| NetError::WsError)?; let _ = ws.close_code(1000).await; //.map_err(|_e| NetError::WsError)?;
log!("CLOSED GRACEFULLY"); log_info!("CLOSED GRACEFULLY");
} else { } else {
log!("PROTOCOL ERR"); log_info!("PROTOCOL ERR");
let mut code = proto_err.clone() as u16; let mut code = proto_err.clone() as u16;
if code > 949 { if code > 949 {
code = ProtocolError::OtherError as u16; code = ProtocolError::OtherError as u16;
@ -150,12 +150,12 @@ async fn ws_loop(
.await; .await;
//.map_err(|_e| NetError::WsError)?; //.map_err(|_e| NetError::WsError)?;
//return Err(Box::new(e)); //return Err(Box::new(e));
log!("ERR {:?}", e); log_info!("ERR {:?}", e);
} }
} }
let last_event = events.next().await; let last_event = events.next().await;
log!("WS closed {:?}", last_event.clone()); log_info!("WS closed {:?}", last_event.clone());
let last_command = match last_event { let last_command = match last_event {
None => ConnectionCommand::Close, None => ConnectionCommand::Close,
Some(WsEvent::Open) => ConnectionCommand::Error(NetError::WsError), // this should never happen Some(WsEvent::Open) => ConnectionCommand::Error(NetError::WsError), // this should never happen
@ -185,6 +185,6 @@ async fn ws_loop(
.await .await
.map_err(|_e| NetError::IoError)?; .map_err(|_e| NetError::IoError)?;
log!("END of WS loop"); log_info!("END of WS loop");
Ok(()) Ok(())
} }

@ -8,7 +8,6 @@ description = "P2P network module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo" }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0" serde_bare = "0.5.0"
@ -26,9 +25,6 @@ noise-protocol = "0.2.0-rc1"
noise-rust-crypto = "0.6.0-rc.1" noise-rust-crypto = "0.6.0-rc.1"
ed25519-dalek = "1.0.1" ed25519-dalek = "1.0.1"
[target.'cfg(target_arch = "wasm32")'.dependencies]
gloo-timers = "0.2.6"
[target.'cfg(target_arch = "wasm32")'.dependencies.getrandom] [target.'cfg(target_arch = "wasm32")'.dependencies.getrandom]
version = "0.2.7" version = "0.2.7"
features = ["js"] features = ["js"]

@ -18,7 +18,7 @@ use std::convert::From;
use std::sync::Arc; use std::sync::Arc;
use crate::utils::{spawn_and_log_error, Receiver, ResultSend, Sender}; use crate::utils::{spawn_and_log_error, Receiver, ResultSend, Sender};
use crate::{connection::*, errors::ProtocolError, log, types::ProtocolMessage}; use crate::{connection::*, errors::ProtocolError, types::ProtocolMessage};
use std::marker::PhantomData; use std::marker::PhantomData;
impl TryFrom<ProtocolMessage> for () { impl TryFrom<ProtocolMessage> for () {

@ -15,7 +15,6 @@ use crate::errors::*;
use crate::types::*; use crate::types::*;
use crate::utils::spawn_and_log_error; use crate::utils::spawn_and_log_error;
use crate::utils::{Receiver, ResultSend, Sender}; use crate::utils::{Receiver, ResultSend, Sender};
use crate::{log, sleep};
use async_std::stream::StreamExt; use async_std::stream::StreamExt;
use async_std::sync::{Arc, RwLock}; use async_std::sync::{Arc, RwLock};
use futures::channel::mpsc; use futures::channel::mpsc;
@ -23,6 +22,7 @@ use futures::SinkExt;
use noise_protocol::U8Array; use noise_protocol::U8Array;
use noise_rust_crypto::sensitive::Sensitive; use noise_rust_crypto::sensitive::Sensitive;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use p2p_repo::log::*;
use p2p_repo::object::Object; use p2p_repo::object::Object;
use p2p_repo::object::ObjectParseError; use p2p_repo::object::ObjectParseError;
use p2p_repo::store::HashMapRepoStore; use p2p_repo::store::HashMapRepoStore;
@ -96,7 +96,7 @@ impl Broker {
// TODO // TODO
let (mut tx, rx) = mpsc::unbounded::<Block>(); let (mut tx, rx) = mpsc::unbounded::<Block>();
//log!("cur {}", std::env::current_dir().unwrap().display()); //log_info!("cur {}", std::env::current_dir().unwrap().display());
//Err(ProtocolError::AccessDenied) //Err(ProtocolError::AccessDenied)
// let f = std::fs::File::open( // let f = std::fs::File::open(
@ -168,10 +168,10 @@ impl Broker {
.unwrap(); .unwrap();
async fn send(mut tx: Sender<Commit>, commit: Commit) -> ResultSend<()> { async fn send(mut tx: Sender<Commit>, commit: Commit) -> ResultSend<()> {
while let Ok(_) = tx.send(commit.clone()).await { while let Ok(_) = tx.send(commit.clone()).await {
log!("sending"); log_info!("sending");
sleep!(std::time::Duration::from_secs(3)); sleep!(std::time::Duration::from_secs(3));
} }
log!("end of sending"); log_info!("end of sending");
Ok(()) Ok(())
} }
spawn_and_log_error(send(tx.clone(), commit)); spawn_and_log_error(send(tx.clone(), commit));
@ -251,7 +251,7 @@ impl Broker {
async fn timer_shutdown(timeout: std::time::Duration) -> ResultSend<()> { async fn timer_shutdown(timeout: std::time::Duration) -> ResultSend<()> {
async move { async move {
sleep!(timeout); sleep!(timeout);
log!("timeout for shutdown"); log_info!("timeout for shutdown");
let _ = BROKER let _ = BROKER
.write() .write()
.await .await
@ -334,8 +334,8 @@ impl Broker {
) -> ResultSend<()> { ) -> ResultSend<()> {
async move { async move {
let res = join.next().await; let res = join.next().await;
log!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id); log_info!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id);
log!("REMOVED"); log_info!("REMOVED");
BROKER.write().await.remove(&remote_peer_id); BROKER.write().await.remove(&remote_peer_id);
} }
.await; .await;
@ -362,7 +362,7 @@ impl Broker {
// TODO check that not already connected to peer // TODO check that not already connected to peer
// IpAddr::from_str("127.0.0.1"); // IpAddr::from_str("127.0.0.1");
log!("CONNECTING"); log_info!("CONNECTING");
let mut connection = cnx let mut connection = cnx
.open( .open(
ip, ip,
@ -405,7 +405,7 @@ impl Broker {
) -> ResultSend<()> { ) -> ResultSend<()> {
async move { async move {
let res = join.next().await; let res = join.next().await;
log!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id); log_info!("SOCKET IS CLOSED {:?} {:?}", res, &remote_peer_id);
if res.is_some() { if res.is_some() {
// we intend to reconnect // we intend to reconnect
let mut broker = BROKER.write().await; let mut broker = BROKER.write().await;
@ -414,10 +414,10 @@ impl Broker {
// let result = broker // let result = broker
// .connect(cnx, ip, core, peer_pubk, peer_privk, remote_peer_id) // .connect(cnx, ip, core, peer_pubk, peer_privk, remote_peer_id)
// .await; // .await;
// log!("SOCKET RECONNECTION {:?} {:?}", result, &remote_peer_id); // log_info!("SOCKET RECONNECTION {:?} {:?}", result, &remote_peer_id);
// TODO: deal with error and incremental backoff // TODO: deal with error and incremental backoff
} else { } else {
log!("REMOVED"); log_info!("REMOVED");
BROKER.write().await.remove(&remote_peer_id); BROKER.write().await.remove(&remote_peer_id);
} }
} }
@ -454,10 +454,10 @@ impl Broker {
pub fn print_status(&self) { pub fn print_status(&self) {
self.peers.iter().for_each(|(peerId, peerInfo)| { self.peers.iter().for_each(|(peerId, peerInfo)| {
log!("PEER in BROKER {:?} {:?}", peerId, peerInfo); log_info!("PEER in BROKER {:?} {:?}", peerId, peerInfo);
}); });
self.direct_connections.iter().for_each(|(ip, directCnx)| { self.direct_connections.iter().for_each(|(ip, directCnx)| {
log!("direct_connection in BROKER {:?} {:?}", ip, directCnx) log_info!("direct_connection in BROKER {:?} {:?}", ip, directCnx)
}); });
} }
} }

@ -2,7 +2,7 @@
* Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers * Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved. * All rights reserved.
* Licensed under the Apache License, Version 2.0 * Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0> * <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, * or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such * at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except * notice may not be copied, modified, or distributed except
@ -12,29 +12,27 @@
//! Connection to a Broker, can be local or remote. //! Connection to a Broker, can be local or remote.
//! If remote, it will use a Stream and Sink of framed messages //! If remote, it will use a Stream and Sink of framed messages
//! This is the trait //! This is the trait
//! //!
use futures::channel::mpsc;
use futures::{ use futures::{
ready, ready, select,
stream::Stream, stream::Stream,
task::{Context, Poll}, task::{Context, Poll},
Future, Future, FutureExt,
select, FutureExt,
}; };
use futures::channel::mpsc;
use std::pin::Pin; use std::pin::Pin;
use std::{collections::HashSet, fmt::Debug}; use std::{collections::HashSet, fmt::Debug};
use crate::errors::*;
use crate::types::*;
use async_broadcast::{broadcast, Receiver}; use async_broadcast::{broadcast, Receiver};
use debug_print::*;
use futures::{pin_mut, stream, Sink, SinkExt, StreamExt}; use futures::{pin_mut, stream, Sink, SinkExt, StreamExt};
use p2p_repo::log::*;
use p2p_repo::object::*; use p2p_repo::object::*;
use p2p_repo::store::*; use p2p_repo::store::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::*; use p2p_repo::utils::*;
use crate::errors::*;
use crate::types::*;
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait BrokerConnection { pub trait BrokerConnection {
@ -88,8 +86,10 @@ pub trait BrokerConnection {
let overlay: OverlayId = match public { let overlay: OverlayId = match public {
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()), true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()),
false => { false => {
let key: [u8; blake3::OUT_LEN] = let key: [u8; blake3::OUT_LEN] = blake3::derive_key(
blake3::derive_key("NextGraph OverlayId BLAKE3 key", repo_link.secret().slice()); "NextGraph OverlayId BLAKE3 key",
repo_link.secret().slice(),
);
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice()); let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice());
Digest::Blake3Digest32(*keyed_hash.as_bytes()) Digest::Blake3Digest32(*keyed_hash.as_bytes())
} }
@ -105,7 +105,7 @@ pub trait BrokerConnection {
match res { match res {
Err(e) => { Err(e) => {
if e == ProtocolError::OverlayNotJoined { if e == ProtocolError::OverlayNotJoined {
debug_println!("OverlayNotJoined"); log_debug!("OverlayNotJoined");
let res2 = self let res2 = self
.process_overlay_request( .process_overlay_request(
overlay, overlay,
@ -125,7 +125,7 @@ pub trait BrokerConnection {
Ok(()) => {} Ok(()) => {}
} }
debug_println!("OverlayConnectionClient ready"); log_debug!("OverlayConnectionClient ready");
Ok(overlay) Ok(overlay)
} }
} }
@ -143,7 +143,11 @@ impl<'a, T> OverlayConnectionClient<'a, T>
where where
T: BrokerConnection, T: BrokerConnection,
{ {
pub fn create( broker: &'a mut T, overlay: OverlayId, repo_link: RepoLink) -> OverlayConnectionClient<'a, T> { pub fn create(
broker: &'a mut T,
overlay: OverlayId,
repo_link: RepoLink,
) -> OverlayConnectionClient<'a, T> {
OverlayConnectionClient { OverlayConnectionClient {
broker, broker,
repo_link, repo_link,
@ -155,8 +159,10 @@ where
let overlay: OverlayId = match public { let overlay: OverlayId = match public {
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()), true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()),
false => { false => {
let key: [u8; blake3::OUT_LEN] = let key: [u8; blake3::OUT_LEN] = blake3::derive_key(
blake3::derive_key("NextGraph OverlayId BLAKE3 key", repo_link.secret().slice()); "NextGraph OverlayId BLAKE3 key",
repo_link.secret().slice(),
);
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice()); let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice());
Digest::Blake3Digest32(*keyed_hash.as_bytes()) Digest::Blake3Digest32(*keyed_hash.as_bytes())
} }
@ -299,7 +305,7 @@ where
repo_pubkey, repo_pubkey,
repo_secret, repo_secret,
); );
debug_println!("object has {} blocks", obj.blocks().len()); log_debug!("object has {} blocks", obj.blocks().len());
let mut deduplicated: HashSet<ObjectId> = HashSet::new(); let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in obj.blocks() { for block in obj.blocks() {
let id = block.id(); let id = block.id();
@ -334,4 +340,4 @@ where
pub fn get_event_stream(&self) -> &Receiver<Event> { pub fn get_event_stream(&self) -> &Receiver<Event> {
&self.event_stream &self.event_stream
} }
} }

@ -19,17 +19,16 @@ use crate::actor::{Actor, SoS};
use crate::actors::*; use crate::actors::*;
use crate::errors::NetError; use crate::errors::NetError;
use crate::errors::ProtocolError; use crate::errors::ProtocolError;
use crate::log;
use crate::types::*; use crate::types::*;
use crate::utils::*; use crate::utils::*;
use async_std::stream::StreamExt; use async_std::stream::StreamExt;
use async_std::sync::Mutex; use async_std::sync::Mutex;
use debug_print::debug_println;
use futures::{channel::mpsc, select, Future, FutureExt, SinkExt}; use futures::{channel::mpsc, select, Future, FutureExt, SinkExt};
use noise_protocol::U8Array; use noise_protocol::U8Array;
use noise_protocol::{patterns::noise_xk, CipherState, HandshakeState}; use noise_protocol::{patterns::noise_xk, CipherState, HandshakeState};
use noise_rust_crypto::sensitive::Sensitive; use noise_rust_crypto::sensitive::Sensitive;
use noise_rust_crypto::*; use noise_rust_crypto::*;
use p2p_repo::log::*;
use p2p_repo::types::{PrivKey, PubKey}; use p2p_repo::types::{PrivKey, PubKey};
use p2p_repo::utils::{sign, verify}; use p2p_repo::utils::{sign, verify};
use serde_bare::from_slice; use serde_bare::from_slice;
@ -205,7 +204,7 @@ impl NoiseFSM {
} }
pub async fn send(&mut self, msg: ProtocolMessage) -> Result<(), ProtocolError> { pub async fn send(&mut self, msg: ProtocolMessage) -> Result<(), ProtocolError> {
log!("SENDING: {:?}", msg); log_info!("SENDING: {:?}", msg);
if self.noise_cipher_state_enc.is_some() { if self.noise_cipher_state_enc.is_some() {
let cipher = self.encrypt(msg)?; let cipher = self.encrypt(msg)?;
self.sender self.sender
@ -251,7 +250,7 @@ impl NoiseFSM {
} }
} }
if msg_opt.is_some() { if msg_opt.is_some() {
log!("RECEIVED: {:?}", msg_opt.as_ref().unwrap()); log_info!("RECEIVED: {:?}", msg_opt.as_ref().unwrap());
} }
match self.state { match self.state {
// TODO verify that ID is zero // TODO verify that ID is zero
@ -314,12 +313,12 @@ impl NoiseFSM {
let mut payload = let mut payload =
handshake.read_message_vec(noise.data()).map_err(|e| { handshake.read_message_vec(noise.data()).map_err(|e| {
debug_println!("{:?}", e); log_debug!("{:?}", e);
ProtocolError::NoiseHandshakeFailed ProtocolError::NoiseHandshakeFailed
})?; })?;
payload = handshake.write_message_vec(&payload).map_err(|e| { payload = handshake.write_message_vec(&payload).map_err(|e| {
debug_println!("{:?}", e); log_debug!("{:?}", e);
ProtocolError::NoiseHandshakeFailed ProtocolError::NoiseHandshakeFailed
})?; })?;
@ -347,7 +346,7 @@ impl NoiseFSM {
.map_err(|e| ProtocolError::NoiseHandshakeFailed)?; .map_err(|e| ProtocolError::NoiseHandshakeFailed)?;
payload = handshake.write_message_vec(&payload).map_err(|e| { payload = handshake.write_message_vec(&payload).map_err(|e| {
debug_println!("{:?}", e); log_debug!("{:?}", e);
ProtocolError::NoiseHandshakeFailed ProtocolError::NoiseHandshakeFailed
})?; })?;
@ -489,7 +488,7 @@ impl NoiseFSM {
if (result.is_err()) { if (result.is_err()) {
return Err(result); return Err(result);
} }
log!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the server side"); log_info!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the server side");
self.state = FSMstate::AuthResult; self.state = FSMstate::AuthResult;
return Ok(StepReply::NONE); return Ok(StepReply::NONE);
} }
@ -509,7 +508,7 @@ impl NoiseFSM {
self.state = FSMstate::AuthResult; self.state = FSMstate::AuthResult;
log!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the client side"); log_info!("AUTHENTICATION SUCCESSFUL ! waiting for requests on the client side");
return Ok(StepReply::NONE); return Ok(StepReply::NONE);
} }
@ -612,7 +611,7 @@ impl ConnectionBase {
ConnectionCommand::Close ConnectionCommand::Close
| ConnectionCommand::Error(_) | ConnectionCommand::Error(_)
| ConnectionCommand::ProtocolError(_) => { | ConnectionCommand::ProtocolError(_) => {
log!("EXIT READ LOOP because : {:?}", msg); log_info!("EXIT READ LOOP because : {:?}", msg);
break; break;
} }
ConnectionCommand::Msg(proto_msg) => { ConnectionCommand::Msg(proto_msg) => {
@ -677,7 +676,7 @@ impl ConnectionBase {
} }
} }
} }
log!("END OF READ LOOP"); log_info!("END OF READ LOOP");
Ok(()) Ok(())
} }
@ -720,7 +719,7 @@ impl ConnectionBase {
// } // }
pub async fn close(&mut self) { pub async fn close(&mut self) {
log!("closing..."); log_info!("closing...");
self.send(ConnectionCommand::Close).await; self.send(ConnectionCommand::Close).await;
} }
@ -772,8 +771,8 @@ mod test {
use crate::actor::*; use crate::actor::*;
use crate::actors::*; use crate::actors::*;
use crate::log;
use crate::types::*; use crate::types::*;
use p2p_repo::log::*;
use std::any::{Any, TypeId}; use std::any::{Any, TypeId};
#[async_std::test] #[async_std::test]
@ -781,14 +780,14 @@ mod test {
#[async_std::test] #[async_std::test]
pub async fn test_typeid() { pub async fn test_typeid() {
log!( log_info!(
"{:?}", "{:?}",
ClientHello::Noise3(Noise::V0(NoiseV0 { data: vec![] })).type_id() ClientHello::Noise3(Noise::V0(NoiseV0 { data: vec![] })).type_id()
); );
let a = Noise::V0(NoiseV0 { data: [].to_vec() }); let a = Noise::V0(NoiseV0 { data: [].to_vec() });
log!("{:?}", a.type_id()); log_info!("{:?}", a.type_id());
log!("{:?}", TypeId::of::<Noise>()); log_info!("{:?}", TypeId::of::<Noise>());
log!("{:?}", ClientHello::Local.type_id()); log_info!("{:?}", ClientHello::Local.type_id());
log!("{:?}", TypeId::of::<ClientHello>()); log_info!("{:?}", TypeId::of::<ClientHello>());
} }
} }

@ -8,6 +8,8 @@
* notice may not be copied, modified, or distributed except * notice may not be copied, modified, or distributed except
* according to those terms. * according to those terms.
*/ */
#[macro_use]
extern crate p2p_repo;
pub mod types; pub mod types;
@ -28,51 +30,3 @@ pub mod utils;
pub mod tests; pub mod tests;
pub static WS_PORT: u16 = 1025; pub static WS_PORT: u16 = 1025;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
// The `console.log` is quite polymorphic, so we can bind it with multiple
// signatures. Note that we need to use `js_name` to ensure we always call
// `log` in JS.
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_u32(a: u32);
// Multiple arguments too!
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_many(a: &str, b: &str);
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
}
#[cfg(not(target_arch = "wasm32"))]
#[macro_export]
macro_rules! log {
($($t:tt)*) => (debug_print::debug_println!($($t)*))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (gloo_timers::future::sleep($($t)*).await)
}
#[cfg(not(target_arch = "wasm32"))]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (std::thread::sleep($($t)*))
}

@ -23,6 +23,37 @@ use crate::{actor::EActor, actors::*, errors::ProtocolError};
use p2p_repo::types::*; use p2p_repo::types::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
//
// Broker common types
//
/// Bind address
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BindAddress {
pub port: u16,
pub ip: IP,
}
/// BrokerServerTypeV0 type
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum BrokerServerTypeV0 {
Localhost(u16), // optional port number
BoxPrivate(Vec<BindAddress>),
BoxPublic(Vec<BindAddress>),
BoxPublicDyn(Vec<BindAddress>), // can be empty
Domain(String), // accepts an option trailing ":port" number
}
/// BrokerServer details Version 0
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BrokerServerV0 {
/// Network addresses
pub server_type: BrokerServerTypeV0,
/// peerId of the server
pub peer_id: PubKey,
}
// //
// COMMON TYPES FOR MESSAGES // COMMON TYPES FOR MESSAGES
// //

@ -9,13 +9,14 @@
* according to those terms. * according to those terms.
*/ */
use crate::log;
use async_std::task; use async_std::task;
use ed25519_dalek::*; use ed25519_dalek::*;
use futures::{channel::mpsc, select, Future, FutureExt, SinkExt}; use futures::{channel::mpsc, select, Future, FutureExt, SinkExt};
pub use noise_protocol::U8Array; pub use noise_protocol::U8Array;
use noise_protocol::DH; use noise_protocol::DH;
pub use noise_rust_crypto::sensitive::Sensitive; pub use noise_rust_crypto::sensitive::Sensitive;
use p2p_repo::log::*;
use p2p_repo::types::PubKey;
#[cfg(target_arch = "wasm32")] #[cfg(target_arch = "wasm32")]
pub fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()> pub fn spawn_and_log_error<F>(fut: F) -> task::JoinHandle<()>
@ -24,7 +25,7 @@ where
{ {
task::spawn_local(async move { task::spawn_local(async move {
if let Err(e) = fut.await { if let Err(e) = fut.await {
log!("EXCEPTION {}", e) log_err!("EXCEPTION {}", e)
} }
}) })
} }
@ -41,7 +42,7 @@ where
{ {
task::spawn(async move { task::spawn(async move {
if let Err(e) = fut.await { if let Err(e) = fut.await {
eprintln!("{}", e) log_err!("{}", e)
} }
}) })
} }
@ -49,6 +50,16 @@ where
pub type Sender<T> = mpsc::UnboundedSender<T>; pub type Sender<T> = mpsc::UnboundedSender<T>;
pub type Receiver<T> = mpsc::UnboundedReceiver<T>; pub type Receiver<T> = mpsc::UnboundedReceiver<T>;
pub fn keys_from_bytes(secret_key: [u8; 32]) -> (Sensitive<[u8; 32]>, PubKey) {
let sk = SecretKey::from_bytes(&secret_key).unwrap();
let pk: PublicKey = (&sk).into();
let pub_key = PubKey::Ed25519PubKey(pk.to_bytes());
let priv_key = Sensitive::<[u8; 32]>::from_slice(&secret_key);
(priv_key, pub_key)
}
pub fn gen_keys() -> (Sensitive<[u8; 32]>, [u8; 32]) { pub fn gen_keys() -> (Sensitive<[u8; 32]>, [u8; 32]) {
let pri = noise_rust_crypto::X25519::genkey(); let pri = noise_rust_crypto::X25519::genkey();
let publ = noise_rust_crypto::X25519::pubkey(&pri); let publ = noise_rust_crypto::X25519::pubkey(&pri);

@ -7,6 +7,10 @@ authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P repository module of NextGraph" description = "P2P repository module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs" repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[features]
server_log_output = []
[dependencies] [dependencies]
blake3 = "1.3.1" blake3 = "1.3.1"
chacha20 = "0.9.0" chacha20 = "0.9.0"
@ -16,8 +20,15 @@ serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0" serde_bare = "0.5.0"
serde_bytes = "0.11.7" serde_bytes = "0.11.7"
fastbloom-rs = "0.5.3" fastbloom-rs = "0.5.3"
debug_print = "1.0.0"
hex = "0.4.3" hex = "0.4.3"
futures = "0.3.24" futures = "0.3.24"
base64-url = "2.0.0" base64-url = "2.0.0"
web-time = "0.2.0" web-time = "0.2.0"
wasm-bindgen = "0.2"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
debug_print = "1.0.0"
log = "0.4"
[target.'cfg(target_arch = "wasm32")'.dependencies]
gloo-timers = "0.2.6"

@ -3,7 +3,7 @@
// This code is partly derived from work written by TG x Thoth from P2Pcollab. // This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth // Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0 // Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0> // <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, // or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such // at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except // notice may not be copied, modified, or distributed except
@ -11,7 +11,7 @@
//! Branch of a Repository //! Branch of a Repository
use debug_print::*; use crate::log::*;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use fastbloom_rs::{BloomFilter as Filter, Membership}; use fastbloom_rs::{BloomFilter as Filter, Membership};
@ -113,9 +113,9 @@ impl Branch {
their_filter: &BloomFilter, their_filter: &BloomFilter,
store: &impl RepoStore, store: &impl RepoStore,
) -> Result<Vec<ObjectId>, ObjectParseError> { ) -> Result<Vec<ObjectId>, ObjectParseError> {
//debug_println!(">> sync_req"); //log_debug!(">> sync_req");
//debug_println!(" our_heads: {:?}", our_heads); //log_debug!(" our_heads: {:?}", our_heads);
//debug_println!(" their_heads: {:?}", their_heads); //log_debug!(" their_heads: {:?}", their_heads);
/// Load `Commit` `Object`s of a `Branch` from the `RepoStore` starting from the given `Object`, /// Load `Commit` `Object`s of a `Branch` from the `RepoStore` starting from the given `Object`,
/// and collect `ObjectId`s starting from `our_heads` towards `their_heads` /// and collect `ObjectId`s starting from `our_heads` towards `their_heads`
@ -126,12 +126,12 @@ impl Branch {
visited: &mut HashSet<ObjectId>, visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>, missing: &mut HashSet<ObjectId>,
) -> Result<bool, ObjectParseError> { ) -> Result<bool, ObjectParseError> {
//debug_println!(">>> load_branch: {}", cobj.id()); //log_debug!(">>> load_branch: {}", cobj.id());
let id = cobj.id(); let id = cobj.id();
// root has no deps // root has no deps
let is_root = cobj.is_root(); let is_root = cobj.is_root();
//debug_println!(" deps: {:?}", cobj.deps()); //log_debug!(" deps: {:?}", cobj.deps());
// check if this commit object is present in their_heads // check if this commit object is present in their_heads
let mut their_head_found = their_heads.contains(&id); let mut their_head_found = their_heads.contains(&id);
@ -172,7 +172,7 @@ impl Branch {
let mut visited = HashSet::new(); let mut visited = HashSet::new();
let their_head_found = let their_head_found =
load_branch(&cobj, store, their_heads, &mut visited, &mut missing)?; load_branch(&cobj, store, their_heads, &mut visited, &mut missing)?;
//debug_println!("<<< load_branch: {}", their_head_found); //log_debug!("<<< load_branch: {}", their_head_found);
ours.extend(visited); // add if one of their_heads found ours.extend(visited); // add if one of their_heads found
} }
@ -181,15 +181,15 @@ impl Branch {
let cobj = Object::load(*id, None, store)?; let cobj = Object::load(*id, None, store)?;
let mut visited = HashSet::new(); let mut visited = HashSet::new();
let their_head_found = load_branch(&cobj, store, &[], &mut visited, &mut missing)?; let their_head_found = load_branch(&cobj, store, &[], &mut visited, &mut missing)?;
//debug_println!("<<< load_branch: {}", their_head_found); //log_debug!("<<< load_branch: {}", their_head_found);
theirs.extend(visited); // add if one of their_heads found theirs.extend(visited); // add if one of their_heads found
} }
let mut result = &ours - &theirs; let mut result = &ours - &theirs;
//debug_println!("!! ours: {:?}", ours); //log_debug!("!! ours: {:?}", ours);
//debug_println!("!! theirs: {:?}", theirs); //log_debug!("!! theirs: {:?}", theirs);
//debug_println!("!! result: {:?}", result); //log_debug!("!! result: {:?}", result);
// remove their_commits from result // remove their_commits from result
let filter = Filter::from_u8_array(their_filter.f.as_slice(), their_filter.k.into()); let filter = Filter::from_u8_array(their_filter.f.as_slice(), their_filter.k.into());
@ -202,7 +202,7 @@ impl Branch {
} }
} }
} }
//debug_println!("!! result filtered: {:?}", result); //log_debug!("!! result filtered: {:?}", result);
Ok(Vec::from_iter(result)) Ok(Vec::from_iter(result))
} }
} }
@ -239,9 +239,9 @@ mod test {
repo_pubkey, repo_pubkey,
repo_secret, repo_secret,
); );
println!(">>> add_obj"); log_debug!(">>> add_obj");
println!(" id: {:?}", obj.id()); log_debug!(" id: {:?}", obj.id());
println!(" deps: {:?}", obj.deps()); log_debug!(" deps: {:?}", obj.deps());
obj.save(store).unwrap(); obj.save(store).unwrap();
obj.reference().unwrap() obj.reference().unwrap()
} }
@ -283,7 +283,7 @@ mod test {
expiry, expiry,
) )
.unwrap(); .unwrap();
//println!("commit: {:?}", commit); //log_debug!("commit: {:?}", commit);
add_obj( add_obj(
ObjectContent::Commit(commit), ObjectContent::Commit(commit),
obj_deps, obj_deps,
@ -303,7 +303,7 @@ mod test {
let deps = vec![]; let deps = vec![];
let expiry = None; let expiry = None;
let body = CommitBody::Branch(branch); let body = CommitBody::Branch(branch);
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContent::CommitBody(body), ObjectContent::CommitBody(body),
deps, deps,
@ -323,7 +323,7 @@ mod test {
let expiry = None; let expiry = None;
let content = [7u8; 777].to_vec(); let content = [7u8; 777].to_vec();
let body = CommitBody::Transaction(Transaction::V0(content)); let body = CommitBody::Transaction(Transaction::V0(content));
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContent::CommitBody(body), ObjectContent::CommitBody(body),
deps, deps,
@ -342,7 +342,7 @@ mod test {
) -> ObjectRef { ) -> ObjectRef {
let expiry = None; let expiry = None;
let body = CommitBody::Ack(Ack::V0()); let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
add_obj( add_obj(
ObjectContent::CommitBody(body), ObjectContent::CommitBody(body),
deps, deps,
@ -359,12 +359,12 @@ mod test {
// repo // repo
let repo_keypair: Keypair = Keypair::generate(&mut rng); let repo_keypair: Keypair = Keypair::generate(&mut rng);
println!( log_debug!(
"repo private key: ({}) {:?}", "repo private key: ({}) {:?}",
repo_keypair.secret.as_bytes().len(), repo_keypair.secret.as_bytes().len(),
repo_keypair.secret.as_bytes() repo_keypair.secret.as_bytes()
); );
println!( log_debug!(
"repo public key: ({}) {:?}", "repo public key: ({}) {:?}",
repo_keypair.public.as_bytes().len(), repo_keypair.public.as_bytes().len(),
repo_keypair.public.as_bytes() repo_keypair.public.as_bytes()
@ -376,11 +376,11 @@ mod test {
// branch // branch
let branch_keypair: Keypair = Keypair::generate(&mut rng); let branch_keypair: Keypair = Keypair::generate(&mut rng);
println!("branch public key: {:?}", branch_keypair.public.as_bytes()); log_debug!("branch public key: {:?}", branch_keypair.public.as_bytes());
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes()); let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes());
let member_keypair: Keypair = Keypair::generate(&mut rng); let member_keypair: Keypair = Keypair::generate(&mut rng);
println!("member public key: {:?}", member_keypair.public.as_bytes()); log_debug!("member public key: {:?}", member_keypair.public.as_bytes());
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes()); let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes());
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes()); let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes());
@ -404,19 +404,19 @@ mod test {
tags, tags,
metadata, metadata,
); );
//println!("branch: {:?}", branch); //log_debug!("branch: {:?}", branch);
fn print_branch() { fn print_branch() {
println!("branch deps/acks:"); log_debug!("branch deps/acks:");
println!(""); log_debug!("");
println!(" br"); log_debug!(" br");
println!(" / \\"); log_debug!(" / \\");
println!(" t1 t2"); log_debug!(" t1 t2");
println!(" / \\ / \\"); log_debug!(" / \\ / \\");
println!(" a3 t4<--t5-->(t1)"); log_debug!(" a3 t4<--t5-->(t1)");
println!(" / \\"); log_debug!(" / \\");
println!(" a6 a7"); log_debug!(" a6 a7");
println!(""); log_debug!("");
} }
print_branch(); print_branch();
@ -434,7 +434,7 @@ mod test {
// create & add commits to store // create & add commits to store
println!(">> br"); log_debug!(">> br");
let br = add_commit( let br = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -448,7 +448,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> t1"); log_debug!(">> t1");
let t1 = add_commit( let t1 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -462,7 +462,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> t2"); log_debug!(">> t2");
let t2 = add_commit( let t2 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -476,7 +476,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> a3"); log_debug!(">> a3");
let a3 = add_commit( let a3 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -490,7 +490,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> t4"); log_debug!(">> t4");
let t4 = add_commit( let t4 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -504,7 +504,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> t5"); log_debug!(">> t5");
let t5 = add_commit( let t5 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -518,7 +518,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> a6"); log_debug!(">> a6");
let a6 = add_commit( let a6 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -532,7 +532,7 @@ mod test {
&mut store, &mut store,
); );
println!(">> a7"); log_debug!(">> a7");
let a7 = add_commit( let a7 = add_commit(
branch_body, branch_body,
member_privkey, member_privkey,
@ -562,10 +562,10 @@ mod test {
}; };
print_branch(); print_branch();
println!(">> sync_req"); log_debug!(">> sync_req");
println!(" our_heads: [a3, t5, a6, a7]"); log_debug!(" our_heads: [a3, t5, a6, a7]");
println!(" their_heads: [a3, t5]"); log_debug!(" their_heads: [a3, t5]");
println!(" their_commits: [br, t1, t2, a3, t5, a6]"); log_debug!(" their_commits: [br, t1, t2, a3, t5, a6]");
let ids = Branch::sync_req( let ids = Branch::sync_req(
&[a3.id, t5.id, a6.id, a7.id], &[a3.id, t5.id, a6.id, a7.id],

@ -3,7 +3,7 @@
// This code is partly derived from work written by TG x Thoth from P2Pcollab. // This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth // Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0 // Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0> // <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, // or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such // at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except // notice may not be copied, modified, or distributed except
@ -11,12 +11,12 @@
//! Commit //! Commit
use debug_print::*;
use ed25519_dalek::*; use ed25519_dalek::*;
use std::collections::HashSet; use std::collections::HashSet;
use std::iter::FromIterator; use std::iter::FromIterator;
use crate::log::*;
use crate::object::*; use crate::object::*;
use crate::store::*; use crate::store::*;
use crate::types::*; use crate::types::*;
@ -271,7 +271,7 @@ impl Commit {
/// Verify if the commit's `body` and dependencies (`deps` & `acks`) are available in the `store` /// Verify if the commit's `body` and dependencies (`deps` & `acks`) are available in the `store`
pub fn verify_deps(&self, store: &impl RepoStore) -> Result<Vec<ObjectId>, CommitLoadError> { pub fn verify_deps(&self, store: &impl RepoStore) -> Result<Vec<ObjectId>, CommitLoadError> {
//debug_println!(">> verify_deps: #{}", self.seq()); //log_debug!(">> verify_deps: #{}", self.seq());
/// Load `Commit`s of a `Branch` from the `RepoStore` starting from the given `Commit`, /// Load `Commit`s of a `Branch` from the `RepoStore` starting from the given `Commit`,
/// and collect missing `ObjectId`s /// and collect missing `ObjectId`s
fn load_branch( fn load_branch(
@ -280,7 +280,7 @@ impl Commit {
visited: &mut HashSet<ObjectId>, visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>, missing: &mut HashSet<ObjectId>,
) -> Result<(), CommitLoadError> { ) -> Result<(), CommitLoadError> {
//debug_println!(">>> load_branch: #{}", commit.seq()); //log_debug!(">>> load_branch: #{}", commit.seq());
// the commit verify_deps() was called on may not have an ID set, // the commit verify_deps() was called on may not have an ID set,
// but the commits loaded from store should have it // but the commits loaded from store should have it
match commit.id() { match commit.id() {
@ -302,7 +302,7 @@ impl Commit {
} }
Err(e) => return Err(e), Err(e) => return Err(e),
}; };
debug_println!("!!! is_root: {}", is_root); log_debug!("!!! is_root: {}", is_root);
// load deps // load deps
if !is_root { if !is_root {
@ -360,12 +360,12 @@ mod test {
pub fn test_commit() { pub fn test_commit() {
let mut csprng = OsRng {}; let mut csprng = OsRng {};
let keypair: Keypair = Keypair::generate(&mut csprng); let keypair: Keypair = Keypair::generate(&mut csprng);
println!( log_debug!(
"private key: ({}) {:?}", "private key: ({}) {:?}",
keypair.secret.as_bytes().len(), keypair.secret.as_bytes().len(),
keypair.secret.as_bytes() keypair.secret.as_bytes()
); );
println!( log_debug!(
"public key: ({}) {:?}", "public key: ({}) {:?}",
keypair.public.as_bytes().len(), keypair.public.as_bytes().len(),
keypair.public.as_bytes() keypair.public.as_bytes()
@ -392,7 +392,7 @@ mod test {
priv_key, pub_key, seq, branch, deps, acks, refs, metadata, body_ref, expiry, priv_key, pub_key, seq, branch, deps, acks, refs, metadata, body_ref, expiry,
) )
.unwrap(); .unwrap();
println!("commit: {:?}", commit); log_debug!("commit: {:?}", commit);
let store = HashMapRepoStore::new(); let store = HashMapRepoStore::new();
let metadata = [66u8; 64].to_vec(); let metadata = [66u8; 64].to_vec();
@ -415,9 +415,9 @@ mod test {
tags, tags,
metadata, metadata,
); );
//println!("branch: {:?}", branch); //log_debug!("branch: {:?}", branch);
let body = CommitBody::Ack(Ack::V0()); let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body); //log_debug!("body: {:?}", body);
match commit.load_body(&store) { match commit.load_body(&store) {
Ok(_b) => panic!("Body should not exist"), Ok(_b) => panic!("Body should not exist"),
@ -428,7 +428,7 @@ mod test {
} }
let content = commit.content(); let content = commit.content();
println!("content: {:?}", content); log_debug!("content: {:?}", content);
commit.verify_sig().expect("Invalid signature"); commit.verify_sig().expect("Invalid signature");
commit commit

@ -19,3 +19,162 @@ pub mod errors;
pub mod kcv_store; pub mod kcv_store;
pub mod site; pub mod site;
pub mod log {
#[cfg(not(target_arch = "wasm32"))]
pub use debug_print::debug_println;
#[cfg(target_arch = "wasm32")]
pub use gloo_timers;
#[cfg(not(target_arch = "wasm32"))]
pub use log;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::prelude::*;
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
#[wasm_bindgen(js_namespace = console)]
pub fn warn(s: &str);
#[wasm_bindgen(js_namespace = console)]
pub fn error(s: &str);
// The `console.log` is quite polymorphic, so we can bind it with multiple
// signatures. Note that we need to use `js_name` to ensure we always call
// `log` in JS.
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_u32(a: u32);
// Multiple arguments too!
#[wasm_bindgen(js_namespace = console, js_name = log)]
fn log_many(a: &str, b: &str);
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_info {
($($t:tt)*) => (println!("INFO:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_err {
($($t:tt)*) => (println!("ERR:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_warn {
($($t:tt)*) => (println!("WARN:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => (debug_println!("DEBUG:{}",format!($($t)*)))
}
#[cfg(all(not(feature = "server_log_output"), not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => (debug_println!("TRACE:{}",format!($($t)*)))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_info {
($($t:tt)*) => (log::info!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_err {
($($t:tt)*) => (log::error!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_warn {
($($t:tt)*) => (log::warn!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => (log::debug!($($t)*))
}
#[cfg(all(feature = "server_log_output", not(target_arch = "wasm32")))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => (log::trace!($($t)*))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log_info {
($($t:tt)*) => (log(&format_args!($($t)*).to_string()))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log_err {
($($t:tt)*) => (error(&format_args!($($t)*).to_string()))
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! log_warn {
($($t:tt)*) => (warn(&format_args!($($t)*).to_string()))
}
#[cfg(all(debug_assertions, target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => (log(format!("DEBUG:{}",&format_args!($($t)*).to_string())))
}
#[cfg(all(debug_assertions, target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => (log(format!("TRACE:{}",&format_args!($($t)*).to_string())))
}
#[cfg(all(not(debug_assertions), target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_debug {
($($t:tt)*) => {};
}
#[cfg(all(not(debug_assertions), target_arch = "wasm32"))]
#[macro_export]
macro_rules! log_trace {
($($t:tt)*) => {};
}
#[cfg(target_arch = "wasm32")]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (gloo_timers::future::sleep($($t)*).await)
}
#[cfg(not(target_arch = "wasm32"))]
#[macro_export]
macro_rules! sleep {
($($t:tt)*) => (std::thread::sleep($($t)*))
}
pub use log_debug;
pub use log_err;
pub use log_info;
pub use log_trace;
pub use log_warn;
pub use sleep;
}

@ -13,11 +13,10 @@
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use debug_print::*;
use chacha20::cipher::{KeyIvInit, StreamCipher}; use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::ChaCha20; use chacha20::ChaCha20;
use crate::log::*;
use crate::store::*; use crate::store::*;
use crate::types::*; use crate::types::*;
@ -101,9 +100,9 @@ impl Object {
cipher.apply_keystream(&mut content_enc_slice); cipher.apply_keystream(&mut content_enc_slice);
let key = SymKey::ChaCha20Key(key.clone()); let key = SymKey::ChaCha20Key(key.clone());
let block = Block::new(children, deps, expiry, content_enc, Some(key)); let block = Block::new(children, deps, expiry, content_enc, Some(key));
//debug_println!(">>> make_block:"); //log_debug!(">>> make_block:");
//debug_println!("!! id: {:?}", obj.id()); //log_debug!("!! id: {:?}", obj.id());
//debug_println!("!! children: ({}) {:?}", children.len(), children); //log_debug!("!! children: ({}) {:?}", children.len(), children);
block block
} }
@ -163,7 +162,7 @@ impl Object {
expiry, expiry,
)); ));
} }
//debug_println!("parents += {}", parents.len()); //log_debug!("parents += {}", parents.len());
if 1 < parents.len() { if 1 < parents.len() {
let mut great_parents = let mut great_parents =
@ -194,7 +193,7 @@ impl Object {
) -> Object { ) -> Object {
// create blocks by chunking + encrypting content // create blocks by chunking + encrypting content
let valid_block_size = store_valid_value_size(block_size); let valid_block_size = store_valid_value_size(block_size);
println!("valid_block_size {}", valid_block_size); log_debug!("valid_block_size {}", valid_block_size);
let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA; let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA;
let mut blocks: Vec<Block> = vec![]; let mut blocks: Vec<Block> = vec![];
@ -418,7 +417,7 @@ impl Object {
leaves: &mut Option<&mut Vec<Block>>, leaves: &mut Option<&mut Vec<Block>>,
obj_content: &mut Option<&mut Vec<u8>>, obj_content: &mut Option<&mut Vec<u8>>,
) -> Result<(), ObjectParseError> { ) -> Result<(), ObjectParseError> {
/*debug_println!( /*log_debug!(
">>> collect_leaves: #{}..{}", ">>> collect_leaves: #{}..{}",
parent_index, parent_index,
parent_index + parents.len() - 1 parent_index + parents.len() - 1
@ -427,13 +426,13 @@ impl Object {
let mut i = parent_index; let mut i = parent_index;
for (id, key) in parents { for (id, key) in parents {
//debug_println!("!!! parent: #{}", i); //log_debug!("!!! parent: #{}", i);
let block = &blocks[i]; let block = &blocks[i];
i += 1; i += 1;
// verify object ID // verify object ID
if *id != block.id() { if *id != block.id() {
debug_println!("Invalid ObjectId.\nExp: {:?}\nGot: {:?}", *id, block.id()); log_debug!("Invalid ObjectId.\nExp: {:?}\nGot: {:?}", *id, block.id());
return Err(ObjectParseError::InvalidBlockId); return Err(ObjectParseError::InvalidBlockId);
} }
@ -455,7 +454,7 @@ impl Object {
match serde_bare::from_slice(content_dec.as_slice()) { match serde_bare::from_slice(content_dec.as_slice()) {
Ok(c) => content = c, Ok(c) => content = c,
Err(e) => { Err(e) => {
debug_println!("Block deserialize error: {}", e); log_debug!("Block deserialize error: {}", e);
return Err(ObjectParseError::BlockDeserializeError); return Err(ObjectParseError::BlockDeserializeError);
} }
} }
@ -464,13 +463,13 @@ impl Object {
match content { match content {
BlockContentV0::InternalNode(keys) => { BlockContentV0::InternalNode(keys) => {
if keys.len() != b.children.len() { if keys.len() != b.children.len() {
debug_println!( log_debug!(
"Invalid keys length: got {}, expected {}", "Invalid keys length: got {}, expected {}",
keys.len(), keys.len(),
b.children.len() b.children.len()
); );
debug_println!("!!! children: {:?}", b.children); log_debug!("!!! children: {:?}", b.children);
debug_println!("!!! keys: {:?}", keys); log_debug!("!!! keys: {:?}", keys);
return Err(ObjectParseError::InvalidKeys); return Err(ObjectParseError::InvalidKeys);
} }
@ -547,7 +546,7 @@ impl Object {
match serde_bare::from_slice(obj_content.as_slice()) { match serde_bare::from_slice(obj_content.as_slice()) {
Ok(c) => Ok(c), Ok(c) => Ok(c),
Err(e) => { Err(e) => {
debug_println!("Object deserialize error: {}", e); log_debug!("Object deserialize error: {}", e);
Err(ObjectParseError::ObjectDeserializeError) Err(ObjectParseError::ObjectDeserializeError)
} }
} }
@ -609,18 +608,18 @@ mod test {
repo_secret, repo_secret,
); );
println!("obj.id: {:?}", obj.id()); log_debug!("obj.id: {:?}", obj.id());
println!("obj.key: {:?}", obj.key()); log_debug!("obj.key: {:?}", obj.key());
println!("obj.blocks.len: {:?}", obj.blocks().len()); log_debug!("obj.blocks.len: {:?}", obj.blocks().len());
let mut i = 0; let mut i = 0;
for node in obj.blocks() { for node in obj.blocks() {
println!("#{}: {:?}", i, node.id()); log_debug!("#{}: {:?}", i, node.id());
let mut file = std::fs::File::create(format!("tests/{}.ng", node.id())) let mut file = std::fs::File::create(format!("tests/{}.ng", node.id()))
.expect("open block write file"); .expect("open block write file");
let ser_file = serde_bare::to_vec(node).unwrap(); let ser_file = serde_bare::to_vec(node).unwrap();
file.write_all(&ser_file); file.write_all(&ser_file);
println!("{:?}", ser_file); log_debug!("{:?}", ser_file);
i += 1; i += 1;
} }
@ -652,14 +651,14 @@ mod test {
repo_secret, repo_secret,
); );
println!("obj.id: {:?}", obj.id()); log_debug!("obj.id: {:?}", obj.id());
println!("obj.key: {:?}", obj.key()); log_debug!("obj.key: {:?}", obj.key());
println!("obj.deps: {:?}", obj.deps()); log_debug!("obj.deps: {:?}", obj.deps());
println!("obj.blocks.len: {:?}", obj.blocks().len()); log_debug!("obj.blocks.len: {:?}", obj.blocks().len());
let mut i = 0; let mut i = 0;
for node in obj.blocks() { for node in obj.blocks() {
println!("#{}: {:?}", i, node.id()); log_debug!("#{}: {:?}", i, node.id());
i += 1; i += 1;
} }
@ -677,13 +676,13 @@ mod test {
let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap(); let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap();
println!("obj2.id: {:?}", obj2.id()); log_debug!("obj2.id: {:?}", obj2.id());
println!("obj2.key: {:?}", obj2.key()); log_debug!("obj2.key: {:?}", obj2.key());
println!("obj2.deps: {:?}", obj2.deps()); log_debug!("obj2.deps: {:?}", obj2.deps());
println!("obj2.blocks.len: {:?}", obj2.blocks().len()); log_debug!("obj2.blocks.len: {:?}", obj2.blocks().len());
let mut i = 0; let mut i = 0;
for node in obj2.blocks() { for node in obj2.blocks() {
println!("#{}: {:?}", i, node.id()); log_debug!("#{}: {:?}", i, node.id());
i += 1; i += 1;
} }
@ -699,13 +698,13 @@ mod test {
let obj3 = Object::load(obj.id(), None, &store).unwrap(); let obj3 = Object::load(obj.id(), None, &store).unwrap();
println!("obj3.id: {:?}", obj3.id()); log_debug!("obj3.id: {:?}", obj3.id());
println!("obj3.key: {:?}", obj3.key()); log_debug!("obj3.key: {:?}", obj3.key());
println!("obj3.deps: {:?}", obj3.deps()); log_debug!("obj3.deps: {:?}", obj3.deps());
println!("obj3.blocks.len: {:?}", obj3.blocks().len()); log_debug!("obj3.blocks.len: {:?}", obj3.blocks().len());
let mut i = 0; let mut i = 0;
for node in obj3.blocks() { for node in obj3.blocks() {
println!("#{}: {:?}", i, node.id()); log_debug!("#{}: {:?}", i, node.id());
i += 1; i += 1;
} }
@ -743,7 +742,7 @@ mod test {
content: vec![], content: vec![],
})); }));
let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap(); let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap();
println!("empty file size: {}", empty_file_ser.len()); log_debug!("empty file size: {}", empty_file_ser.len());
let size = store_max_value_size() let size = store_max_value_size()
- EMPTY_BLOCK_SIZE - EMPTY_BLOCK_SIZE
@ -751,7 +750,7 @@ mod test {
- BLOCK_ID_SIZE * deps.len() - BLOCK_ID_SIZE * deps.len()
- empty_file_ser.len() - empty_file_ser.len()
- DATA_VARINT_EXTRA; - DATA_VARINT_EXTRA;
println!("file size: {}", size); log_debug!("file size: {}", size);
let content = ObjectContent::File(File::V0(FileV0 { let content = ObjectContent::File(File::V0(FileV0 {
content_type: "".into(), content_type: "".into(),
@ -759,7 +758,7 @@ mod test {
content: vec![99; size], content: vec![99; size],
})); }));
let content_ser = serde_bare::to_vec(&content).unwrap(); let content_ser = serde_bare::to_vec(&content).unwrap();
println!("content len: {}", content_ser.len()); log_debug!("content len: {}", content_ser.len());
let expiry = Some(2u32.pow(31)); let expiry = Some(2u32.pow(31));
let max_object_size = store_max_value_size(); let max_object_size = store_max_value_size();
@ -776,18 +775,18 @@ mod test {
repo_secret, repo_secret,
); );
println!("root_id: {:?}", object.id()); log_debug!("root_id: {:?}", object.id());
println!("root_key: {:?}", object.key().unwrap()); log_debug!("root_key: {:?}", object.key().unwrap());
println!("nodes.len: {:?}", object.blocks().len()); log_debug!("nodes.len: {:?}", object.blocks().len());
//println!("root: {:?}", tree.root()); //log_debug!("root: {:?}", tree.root());
//println!("nodes: {:?}", object.blocks); //log_debug!("nodes: {:?}", object.blocks);
assert_eq!(object.blocks.len(), 1); assert_eq!(object.blocks.len(), 1);
} }
#[test] #[test]
pub fn test_block_size() { pub fn test_block_size() {
let max_block_size = store_max_value_size(); let max_block_size = store_max_value_size();
println!("max_object_size: {}", max_block_size); log_debug!("max_object_size: {}", max_block_size);
let id = Digest::Blake3Digest32([0u8; 32]); let id = Digest::Blake3Digest32([0u8; 32]);
let key = SymKey::ChaCha20Key([0u8; 32]); let key = SymKey::ChaCha20Key([0u8; 32]);
@ -880,35 +879,35 @@ mod test {
); );
let root_two_ser = serde_bare::to_vec(&root_two).unwrap(); let root_two_ser = serde_bare::to_vec(&root_two).unwrap();
println!( log_debug!(
"range of valid value sizes {} {}", "range of valid value sizes {} {}",
store_valid_value_size(0), store_valid_value_size(0),
store_max_value_size() store_max_value_size()
); );
println!( log_debug!(
"max_data_payload_of_object: {}", "max_data_payload_of_object: {}",
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA
); );
println!( log_debug!(
"max_data_payload_depth_1: {}", "max_data_payload_depth_1: {}",
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA - MAX_DEPS_SIZE max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA - MAX_DEPS_SIZE
); );
println!( log_debug!(
"max_data_payload_depth_2: {}", "max_data_payload_depth_2: {}",
MAX_ARITY_ROOT * MAX_DATA_PAYLOAD_SIZE MAX_ARITY_ROOT * MAX_DATA_PAYLOAD_SIZE
); );
println!( log_debug!(
"max_data_payload_depth_3: {}", "max_data_payload_depth_3: {}",
MAX_ARITY_ROOT * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE MAX_ARITY_ROOT * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE
); );
let max_arity_leaves = (max_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2) let max_arity_leaves = (max_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
println!("max_arity_leaves: {}", max_arity_leaves); log_debug!("max_arity_leaves: {}", max_arity_leaves);
assert_eq!(max_arity_leaves, MAX_ARITY_LEAVES); assert_eq!(max_arity_leaves, MAX_ARITY_LEAVES);
assert_eq!( assert_eq!(
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA, max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA,
@ -917,15 +916,15 @@ mod test {
let max_arity_root = let max_arity_root =
(max_block_size - EMPTY_BLOCK_SIZE - MAX_DEPS_SIZE - BIG_VARINT_EXTRA * 2) (max_block_size - EMPTY_BLOCK_SIZE - MAX_DEPS_SIZE - BIG_VARINT_EXTRA * 2)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE); / (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
println!("max_arity_root: {}", max_arity_root); log_debug!("max_arity_root: {}", max_arity_root);
assert_eq!(max_arity_root, MAX_ARITY_ROOT); assert_eq!(max_arity_root, MAX_ARITY_ROOT);
println!("store_max_value_size: {}", leaf_full_data_ser.len()); log_debug!("store_max_value_size: {}", leaf_full_data_ser.len());
assert_eq!(leaf_full_data_ser.len(), max_block_size); assert_eq!(leaf_full_data_ser.len(), max_block_size);
println!("leaf_empty: {}", leaf_empty_ser.len()); log_debug!("leaf_empty: {}", leaf_empty_ser.len());
assert_eq!(leaf_empty_ser.len(), EMPTY_BLOCK_SIZE); assert_eq!(leaf_empty_ser.len(), EMPTY_BLOCK_SIZE);
println!("root_depsref: {}", root_depsref_ser.len()); log_debug!("root_depsref: {}", root_depsref_ser.len());
assert_eq!(root_depsref_ser.len(), EMPTY_ROOT_SIZE_DEPSREF); assert_eq!(root_depsref_ser.len(), EMPTY_ROOT_SIZE_DEPSREF);
println!("internal_max: {}", internal_max_ser.len()); log_debug!("internal_max: {}", internal_max_ser.len());
assert_eq!( assert_eq!(
internal_max_ser.len(), internal_max_ser.len(),
EMPTY_BLOCK_SIZE EMPTY_BLOCK_SIZE
@ -933,22 +932,22 @@ mod test {
+ MAX_ARITY_LEAVES * (BLOCK_ID_SIZE + BLOCK_KEY_SIZE) + MAX_ARITY_LEAVES * (BLOCK_ID_SIZE + BLOCK_KEY_SIZE)
); );
assert!(internal_max_ser.len() < max_block_size); assert!(internal_max_ser.len() < max_block_size);
println!("internal_one: {}", internal_one_ser.len()); log_debug!("internal_one: {}", internal_one_ser.len());
assert_eq!( assert_eq!(
internal_one_ser.len(), internal_one_ser.len(),
EMPTY_BLOCK_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE EMPTY_BLOCK_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE
); );
println!("internal_two: {}", internal_two_ser.len()); log_debug!("internal_two: {}", internal_two_ser.len());
assert_eq!( assert_eq!(
internal_two_ser.len(), internal_two_ser.len(),
EMPTY_BLOCK_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE EMPTY_BLOCK_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE
); );
println!("root_one: {}", root_one_ser.len()); log_debug!("root_one: {}", root_one_ser.len());
assert_eq!( assert_eq!(
root_one_ser.len(), root_one_ser.len(),
EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE
); );
println!("root_two: {}", root_two_ser.len()); log_debug!("root_two: {}", root_two_ser.len());
assert_eq!( assert_eq!(
root_two_ser.len(), root_two_ser.len(),
EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE
@ -961,9 +960,9 @@ mod test {
// let arity_512: usize = // let arity_512: usize =
// (object_size_512 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE); // (object_size_512 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE);
// println!("1-page object_size: {}", object_size_1); // log_debug!("1-page object_size: {}", object_size_1);
// println!("512-page object_size: {}", object_size_512); // log_debug!("512-page object_size: {}", object_size_512);
// println!("max arity of 1-page object: {}", arity_1); // log_debug!("max arity of 1-page object: {}", arity_1);
// println!("max arity of 512-page object: {}", arity_512); // log_debug!("max arity of 512-page object: {}", arity_512);
} }
} }

@ -27,12 +27,12 @@ pub fn generate_null_keypair() -> (PrivKey, PubKey) {
secret: sk, secret: sk,
}; };
// println!( // log_debug!(
// "private key: ({}) {:?}", // "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(), // keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes() // keypair.secret.as_bytes()
// ); // );
// println!( // log_debug!(
// "public key: ({}) {:?}", // "public key: ({}) {:?}",
// keypair.public.as_bytes().len(), // keypair.public.as_bytes().len(),
// keypair.public.as_bytes() // keypair.public.as_bytes()
@ -45,12 +45,12 @@ pub fn generate_null_keypair() -> (PrivKey, PubKey) {
} }
pub fn keypair_from_ed(secret: SecretKey, public: PublicKey) -> (PrivKey, PubKey) { pub fn keypair_from_ed(secret: SecretKey, public: PublicKey) -> (PrivKey, PubKey) {
// println!( // log_debug!(
// "private key: ({}) {:?}", // "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(), // keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes() // keypair.secret.as_bytes()
// ); // );
// println!( // log_debug!(
// "public key: ({}) {:?}", // "public key: ({}) {:?}",
// keypair.public.as_bytes().len(), // keypair.public.as_bytes().len(),
// keypair.public.as_bytes() // keypair.public.as_bytes()
@ -94,12 +94,12 @@ pub fn verify(content: &Vec<u8>, sig: Sig, pub_key: PubKey) -> Result<(), NgErro
pub fn generate_keypair() -> (PrivKey, PubKey) { pub fn generate_keypair() -> (PrivKey, PubKey) {
let mut csprng = OsRng {}; let mut csprng = OsRng {};
let keypair: Keypair = Keypair::generate(&mut csprng); let keypair: Keypair = Keypair::generate(&mut csprng);
// println!( // log_debug!(
// "private key: ({}) {:?}", // "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(), // keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes() // keypair.secret.as_bytes()
// ); // );
// println!( // log_debug!(
// "public key: ({}) {:?}", // "public key: ({}) {:?}",
// keypair.public.as_bytes().len(), // keypair.public.as_bytes().len(),
// keypair.public.as_bytes() // keypair.public.as_bytes()

@ -9,7 +9,6 @@ repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies] [dependencies]
p2p-repo = { path = "../p2p-repo" } p2p-repo = { path = "../p2p-repo" }
debug_print = "1.0.0"
serde = { version = "1.0.142", features = ["derive"] } serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0" serde_bare = "0.5.0"
tempfile = "3" tempfile = "3"

@ -12,7 +12,7 @@ use p2p_repo::store::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::*; use p2p_repo::utils::*;
use debug_print::*; use p2p_repo::log::*;
use std::path::Path; use std::path::Path;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::RwLockReadGuard; use std::sync::RwLockReadGuard;
@ -383,7 +383,7 @@ impl LmdbKCVStore {
.unwrap(); .unwrap();
let env = shared_rkv.read().unwrap(); let env = shared_rkv.read().unwrap();
println!("created env with LMDB Version: {}", env.version()); log_info!("created env with LMDB Version: {}", env.version());
let main_store = env.open_multi("main", StoreOptions::create()).unwrap(); let main_store = env.open_multi("main", StoreOptions::create()).unwrap();

@ -1,7 +1,7 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers // Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved. // All rights reserved.
// Licensed under the Apache License, Version 2.0 // Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0> // <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, // or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such // at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except // notice may not be copied, modified, or distributed except
@ -11,7 +11,7 @@ use p2p_repo::store::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::*; use p2p_repo::utils::*;
use debug_print::*; use p2p_repo::log::*;
use std::path::Path; use std::path::Path;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@ -99,7 +99,7 @@ impl RepoStore for LmdbRepoStore {
Err(_e) => Err(StorageError::InvalidValue), Err(_e) => Err(StorageError::InvalidValue),
Ok(o) => { Ok(o) => {
if o.id() != *block_id { if o.id() != *block_id {
debug_println!( log_debug!(
"Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}", "Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}",
block_id, block_id,
o.id(), o.id(),
@ -114,7 +114,7 @@ impl RepoStore for LmdbRepoStore {
} }
} }
/// Adds a block in the storage backend. /// Adds a block in the storage backend.
/// The block is persisted to disk. /// The block is persisted to disk.
/// Returns the BlockId of the Block. /// Returns the BlockId of the Block.
fn put(&self, block: &Block) -> Result<BlockId, StorageError> { fn put(&self, block: &Block) -> Result<BlockId, StorageError> {
@ -201,7 +201,6 @@ impl RepoStore for LmdbRepoStore {
writer.commit().unwrap(); writer.commit().unwrap();
Ok((block, slice.len())) Ok((block, slice.len()))
} }
} }
impl LmdbRepoStore { impl LmdbRepoStore {
@ -217,7 +216,7 @@ impl LmdbRepoStore {
.unwrap(); .unwrap();
let env = shared_rkv.read().unwrap(); let env = shared_rkv.read().unwrap();
println!( log_debug!(
"created env with LMDB Version: {} key: {}", "created env with LMDB Version: {} key: {}",
env.version(), env.version(),
hex::encode(&key) hex::encode(&key)
@ -352,7 +351,7 @@ impl LmdbRepoStore {
if !meta.pin { if !meta.pin {
// we add an entry to recently_used_store with now // we add an entry to recently_used_store with now
println!("adding to LRU"); log_debug!("adding to LRU");
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap(); self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
} }
} }
@ -362,7 +361,7 @@ impl LmdbRepoStore {
synced: true, synced: true,
last_used: now, last_used: now,
}; };
println!("adding to LRU also"); log_debug!("adding to LRU also");
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap(); self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
} }
} }
@ -396,7 +395,7 @@ impl LmdbRepoStore {
while let Some(Ok(mut sub_iter)) = iter.next() { while let Some(Ok(mut sub_iter)) = iter.next() {
while let Some(Ok(k)) = sub_iter.next() { while let Some(Ok(k)) = sub_iter.next() {
//println!("removing {:?} {:?}", k.0, k.1); //log_debug!("removing {:?} {:?}", k.0, k.1);
let block_id = serde_bare::from_slice::<ObjectId>(k.1).unwrap(); let block_id = serde_bare::from_slice::<ObjectId>(k.1).unwrap();
block_ids.push(block_id); block_ids.push(block_id);
} }
@ -430,7 +429,7 @@ impl LmdbRepoStore {
} }
for block_id in block_ids { for block_id in block_ids {
let (block, block_size) = self.del(&block_id).unwrap(); let (block, block_size) = self.del(&block_id).unwrap();
println!("removed {:?}", block_id); log_debug!("removed {:?}", block_id);
total += block_size; total += block_size;
if total >= size { if total >= size {
break; break;
@ -468,25 +467,25 @@ impl LmdbRepoStore {
fn list_all(&self) { fn list_all(&self) {
let lock = self.environment.read().unwrap(); let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap(); let reader = lock.read().unwrap();
println!("MAIN"); log_debug!("MAIN");
let mut iter = self.main_store.iter_start(&reader).unwrap(); let mut iter = self.main_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter.next() { while let Some(Ok(entry)) = iter.next() {
println!("{:?} {:?}", entry.0, entry.1) log_debug!("{:?} {:?}", entry.0, entry.1)
} }
println!("META"); log_debug!("META");
let mut iter2 = self.meta_store.iter_start(&reader).unwrap(); let mut iter2 = self.meta_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter2.next() { while let Some(Ok(entry)) = iter2.next() {
println!("{:?} {:?}", entry.0, entry.1) log_debug!("{:?} {:?}", entry.0, entry.1)
} }
println!("EXPIRY"); log_debug!("EXPIRY");
let mut iter3 = self.expiry_store.iter_start(&reader).unwrap(); let mut iter3 = self.expiry_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter3.next() { while let Some(Ok(entry)) = iter3.next() {
println!("{:?} {:?}", entry.0, entry.1) log_debug!("{:?} {:?}", entry.0, entry.1)
} }
println!("LRU"); log_debug!("LRU");
let mut iter4 = self.recently_used_store.iter_start(&reader).unwrap(); let mut iter4 = self.recently_used_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter4.next() { while let Some(Ok(entry)) = iter4.next() {
println!("{:?} {:?}", entry.0, entry.1) log_debug!("{:?} {:?}", entry.0, entry.1)
} }
} }
} }
@ -494,6 +493,7 @@ impl LmdbRepoStore {
mod test { mod test {
use crate::repo_store::LmdbRepoStore; use crate::repo_store::LmdbRepoStore;
use p2p_repo::log::*;
use p2p_repo::store::*; use p2p_repo::store::*;
use p2p_repo::types::*; use p2p_repo::types::*;
use p2p_repo::utils::*; use p2p_repo::utils::*;
@ -511,7 +511,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap(); let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key); let mut store = LmdbRepoStore::open(root.path(), key);
let mut now = now_timestamp(); let mut now = now_timestamp();
now -= 200; now -= 200;
@ -525,14 +525,14 @@ mod test {
None, None,
); );
let block_id = store.put(&block).unwrap(); let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", x, block_id); log_debug!("#{} -> objId {:?}", x, block_id);
store store
.has_been_synced(&block_id, Some(now + x as u32)) .has_been_synced(&block_id, Some(now + x as u32))
.unwrap(); .unwrap();
} }
let ret = store.remove_least_used(200); let ret = store.remove_least_used(200);
println!("removed {}", ret); log_debug!("removed {}", ret);
assert_eq!(ret, 208) assert_eq!(ret, 208)
//store.list_all(); //store.list_all();
@ -544,7 +544,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap(); let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key); let mut store = LmdbRepoStore::open(root.path(), key);
let mut now = now_timestamp(); let mut now = now_timestamp();
now -= 200; now -= 200;
@ -558,7 +558,7 @@ mod test {
None, None,
); );
let obj_id = store.put(&block).unwrap(); let obj_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", x, obj_id); log_debug!("#{} -> objId {:?}", x, obj_id);
store.set_pin(&obj_id, true).unwrap(); store.set_pin(&obj_id, true).unwrap();
store store
.has_been_synced(&obj_id, Some(now + x as u32)) .has_been_synced(&obj_id, Some(now + x as u32))
@ -566,7 +566,7 @@ mod test {
} }
let ret = store.remove_least_used(200); let ret = store.remove_least_used(200);
println!("removed {}", ret); log_debug!("removed {}", ret);
assert_eq!(ret, 0); assert_eq!(ret, 0);
store.list_all(); store.list_all();
@ -601,7 +601,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap(); let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key); let mut store = LmdbRepoStore::open(root.path(), key);
let now = now_timestamp(); let now = now_timestamp();
@ -619,7 +619,7 @@ mod test {
now + 10, now + 10,
]; ];
let mut block_ids: Vec<ObjectId> = Vec::with_capacity(11); let mut block_ids: Vec<ObjectId> = Vec::with_capacity(11);
println!("now {}", now); log_debug!("now {}", now);
let mut i = 0u8; let mut i = 0u8;
for expiry in list { for expiry in list {
@ -632,7 +632,7 @@ mod test {
None, None,
); );
let block_id = store.put(&block).unwrap(); let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", i, block_id); log_debug!("#{} -> objId {:?}", i, block_id);
block_ids.push(block_id); block_ids.push(block_id);
i += 1; i += 1;
} }
@ -655,7 +655,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap(); let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key); let mut store = LmdbRepoStore::open(root.path(), key);
let now = now_timestamp(); let now = now_timestamp();
@ -668,7 +668,7 @@ mod test {
now - 2, //#5 should be removed, and above now - 2, //#5 should be removed, and above
]; ];
let mut block_ids: Vec<ObjectId> = Vec::with_capacity(6); let mut block_ids: Vec<ObjectId> = Vec::with_capacity(6);
println!("now {}", now); log_debug!("now {}", now);
let mut i = 0u8; let mut i = 0u8;
for expiry in list { for expiry in list {
@ -681,7 +681,7 @@ mod test {
None, None,
); );
let block_id = store.put(&block).unwrap(); let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", i, block_id); log_debug!("#{} -> objId {:?}", i, block_id);
block_ids.push(block_id); block_ids.push(block_id);
i += 1; i += 1;
} }
@ -702,7 +702,7 @@ mod test {
let root = Builder::new().prefix(path_str).tempdir().unwrap(); let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let store = LmdbRepoStore::open(root.path(), key); let store = LmdbRepoStore::open(root.path(), key);
store.remove_expired().unwrap(); store.remove_expired().unwrap();
} }
@ -715,7 +715,7 @@ mod test {
let key: [u8; 32] = [0; 32]; let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key); let mut store = LmdbRepoStore::open(root.path(), key);
@ -730,7 +730,7 @@ mod test {
let block_id = store.put(&block).unwrap(); let block_id = store.put(&block).unwrap();
assert_eq!(block_id, block.id()); assert_eq!(block_id, block.id());
println!("ObjectId: {:?}", block_id); log_debug!("ObjectId: {:?}", block_id);
assert_eq!( assert_eq!(
block_id, block_id,
Digest::Blake3Digest32([ Digest::Blake3Digest32([
@ -741,7 +741,7 @@ mod test {
let block_res = store.get(&block_id).unwrap(); let block_res = store.get(&block_id).unwrap();
println!("Block: {:?}", block_res); log_debug!("Block: {:?}", block_res);
assert_eq!(block_res.id(), block.id()); assert_eq!(block_res.id(), block.id());
} }
@ -755,7 +755,7 @@ mod test {
{ {
fs::create_dir_all(root.path()).unwrap(); fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap()); log_debug!("{}", root.path().to_str().unwrap());
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap(); let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let shared_rkv = manager let shared_rkv = manager
@ -766,7 +766,7 @@ mod test {
.unwrap(); .unwrap();
let env = shared_rkv.read().unwrap(); let env = shared_rkv.read().unwrap();
println!("LMDB Version: {}", env.version()); log_debug!("LMDB Version: {}", env.version());
let store = env.open_single("testdb", StoreOptions::create()).unwrap(); let store = env.open_single("testdb", StoreOptions::create()).unwrap();
@ -823,12 +823,12 @@ mod test {
let reader = env.read().expect("reader"); let reader = env.read().expect("reader");
let stat = store.stat(&reader).unwrap(); let stat = store.stat(&reader).unwrap();
println!("LMDB stat page_size : {}", stat.page_size()); log_debug!("LMDB stat page_size : {}", stat.page_size());
println!("LMDB stat depth : {}", stat.depth()); log_debug!("LMDB stat depth : {}", stat.depth());
println!("LMDB stat branch_pages : {}", stat.branch_pages()); log_debug!("LMDB stat branch_pages : {}", stat.branch_pages());
println!("LMDB stat leaf_pages : {}", stat.leaf_pages()); log_debug!("LMDB stat leaf_pages : {}", stat.leaf_pages());
println!("LMDB stat overflow_pages : {}", stat.overflow_pages()); log_debug!("LMDB stat overflow_pages : {}", stat.overflow_pages());
println!("LMDB stat entries : {}", stat.entries()); log_debug!("LMDB stat entries : {}", stat.entries());
} }
// { // {
@ -838,17 +838,17 @@ mod test {
// let reader = env.read().expect("reader"); // let reader = env.read().expect("reader");
// // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`. // // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`.
// // println!("Get int {:?}", store.get(&reader, "int").unwrap()); // // log_debug!("Get int {:?}", store.get(&reader, "int").unwrap());
// // println!("Get uint {:?}", store.get(&reader, "uint").unwrap()); // // log_debug!("Get uint {:?}", store.get(&reader, "uint").unwrap());
// // println!("Get float {:?}", store.get(&reader, "float").unwrap()); // // log_debug!("Get float {:?}", store.get(&reader, "float").unwrap());
// // println!("Get instant {:?}", store.get(&reader, "instant").unwrap()); // // log_debug!("Get instant {:?}", store.get(&reader, "instant").unwrap());
// // println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap()); // // log_debug!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
// // println!("Get string {:?}", store.get(&reader, "string").unwrap()); // // log_debug!("Get string {:?}", store.get(&reader, "string").unwrap());
// // println!("Get json {:?}", store.get(&reader, "json").unwrap()); // // log_debug!("Get json {:?}", store.get(&reader, "json").unwrap());
// println!("Get blob {:?}", store.get(&reader, "blob").unwrap()); // log_debug!("Get blob {:?}", store.get(&reader, "blob").unwrap());
// // Retrieving a non-existent value returns `Ok(None)`. // // Retrieving a non-existent value returns `Ok(None)`.
// println!( // log_debug!(
// "Get non-existent value {:?}", // "Get non-existent value {:?}",
// store.get(&reader, "non-existent").unwrap() // store.get(&reader, "non-existent").unwrap()
// ); // );
@ -864,7 +864,7 @@ mod test {
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap(); // store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// writer.abort(); // writer.abort();
// let reader = env.read().expect("reader"); // let reader = env.read().expect("reader");
// println!( // log_debug!(
// "It should be None! ({:?})", // "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap() // store.get(&reader, "foo").unwrap()
// ); // );
@ -879,7 +879,7 @@ mod test {
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap(); // store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// } // }
// let reader = env.read().expect("reader"); // let reader = env.read().expect("reader");
// println!( // log_debug!(
// "It should be None! ({:?})", // "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap() // store.get(&reader, "foo").unwrap()
// ); // );
@ -898,26 +898,26 @@ mod test {
// // In the code above, "foo" and "bar" were put into the store, then "foo" was // // In the code above, "foo" and "bar" were put into the store, then "foo" was
// // deleted so only "bar" will return a result when the database is queried via the // // deleted so only "bar" will return a result when the database is queried via the
// // writer. // // writer.
// println!( // log_debug!(
// "It should be None! ({:?})", // "It should be None! ({:?})",
// store.get(&writer, "foo").unwrap() // store.get(&writer, "foo").unwrap()
// ); // );
// println!("Get bar ({:?})", store.get(&writer, "bar").unwrap()); // log_debug!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
// // But a reader won't see that change until the write transaction is committed. // // But a reader won't see that change until the write transaction is committed.
// { // {
// let reader = env.read().expect("reader"); // let reader = env.read().expect("reader");
// println!("Get foo {:?}", store.get(&reader, "foo").unwrap()); // log_debug!("Get foo {:?}", store.get(&reader, "foo").unwrap());
// println!("Get bar {:?}", store.get(&reader, "bar").unwrap()); // log_debug!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// } // }
// writer.commit().unwrap(); // writer.commit().unwrap();
// { // {
// let reader = env.read().expect("reader"); // let reader = env.read().expect("reader");
// println!( // log_debug!(
// "It should be None! ({:?})", // "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap() // store.get(&reader, "foo").unwrap()
// ); // );
// println!("Get bar {:?}", store.get(&reader, "bar").unwrap()); // log_debug!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// } // }
// // Committing a transaction consumes the writer, preventing you from reusing it by // // Committing a transaction consumes the writer, preventing you from reusing it by
@ -943,11 +943,11 @@ mod test {
// // { // // {
// // let reader = env.read().expect("reader"); // // let reader = env.read().expect("reader");
// // println!( // // log_debug!(
// // "It should be None! ({:?})", // // "It should be None! ({:?})",
// // store.get(&reader, "foo").unwrap() // // store.get(&reader, "foo").unwrap()
// // ); // // );
// // println!( // // log_debug!(
// // "It should be None! ({:?})", // // "It should be None! ({:?})",
// // store.get(&reader, "bar").unwrap() // // store.get(&reader, "bar").unwrap()
// // ); // // );
@ -956,17 +956,17 @@ mod test {
let stat = env.stat().unwrap(); let stat = env.stat().unwrap();
let info = env.info().unwrap(); let info = env.info().unwrap();
println!("LMDB info map_size : {}", info.map_size()); log_debug!("LMDB info map_size : {}", info.map_size());
println!("LMDB info last_pgno : {}", info.last_pgno()); log_debug!("LMDB info last_pgno : {}", info.last_pgno());
println!("LMDB info last_txnid : {}", info.last_txnid()); log_debug!("LMDB info last_txnid : {}", info.last_txnid());
println!("LMDB info max_readers : {}", info.max_readers()); log_debug!("LMDB info max_readers : {}", info.max_readers());
println!("LMDB info num_readers : {}", info.num_readers()); log_debug!("LMDB info num_readers : {}", info.num_readers());
println!("LMDB stat page_size : {}", stat.page_size()); log_debug!("LMDB stat page_size : {}", stat.page_size());
println!("LMDB stat depth : {}", stat.depth()); log_debug!("LMDB stat depth : {}", stat.depth());
println!("LMDB stat branch_pages : {}", stat.branch_pages()); log_debug!("LMDB stat branch_pages : {}", stat.branch_pages());
println!("LMDB stat leaf_pages : {}", stat.leaf_pages()); log_debug!("LMDB stat leaf_pages : {}", stat.leaf_pages());
println!("LMDB stat overflow_pages : {}", stat.overflow_pages()); log_debug!("LMDB stat overflow_pages : {}", stat.overflow_pages());
println!("LMDB stat entries : {}", stat.entries()); log_debug!("LMDB stat entries : {}", stat.entries());
} }
// We reopen the env and data to see if it was well saved to disk. // We reopen the env and data to see if it was well saved to disk.
{ {
@ -979,13 +979,13 @@ mod test {
.unwrap(); .unwrap();
let env = shared_rkv.read().unwrap(); let env = shared_rkv.read().unwrap();
println!("LMDB Version: {}", env.version()); log_debug!("LMDB Version: {}", env.version());
let mut store = env.open_single("testdb", StoreOptions::default()).unwrap(); //StoreOptions::create() let mut store = env.open_single("testdb", StoreOptions::default()).unwrap(); //StoreOptions::create()
{ {
let reader = env.read().expect("reader"); let reader = env.read().expect("reader");
println!( log_debug!(
"It should be baz! ({:?})", "It should be baz! ({:?})",
store.get(&reader, "bar").unwrap() store.get(&reader, "bar").unwrap()
); );

Loading…
Cancel
Save