we have a localfirst E2EE dropbox

master
Niko PLP 2 weeks ago
parent fe4a41b3c2
commit c8b7a04ab4
  1. 7
      Cargo.lock
  2. 4
      nextgraph/examples/in_memory.rs
  3. 4
      nextgraph/examples/open.rs
  4. 4
      nextgraph/examples/persistent.rs
  5. 177
      nextgraph/src/local_broker.rs
  6. 1
      ng-app/src-tauri/Cargo.toml
  7. 127
      ng-app/src-tauri/src/lib.rs
  8. 32
      ng-app/src/api.ts
  9. 3
      ng-app/src/lib/Home.svelte
  10. 221
      ng-app/src/lib/Test.svelte
  11. 20
      ng-app/src/routes/Test.svelte
  12. 36
      ng-app/src/store.ts
  13. 59
      ng-broker/src/rocksdb_server_storage.rs
  14. 119
      ng-broker/src/server_broker.rs
  15. 1
      ng-net/Cargo.toml
  16. 102
      ng-net/src/actors/client/blocks_exist.rs
  17. 129
      ng-net/src/actors/client/blocks_get.rs
  18. 79
      ng-net/src/actors/client/blocks_put.rs
  19. 36
      ng-net/src/actors/client/event.rs
  20. 6
      ng-net/src/actors/client/mod.rs
  21. 25
      ng-net/src/actors/client/pin_repo.rs
  22. 17
      ng-net/src/actors/client/topic_sub.rs
  23. 237
      ng-net/src/broker.rs
  24. 64
      ng-net/src/connection.rs
  25. 17
      ng-net/src/server_broker.rs
  26. 143
      ng-net/src/types.rs
  27. 9
      ng-repo/src/block_storage.rs
  28. 18
      ng-repo/src/commit.rs
  29. 31
      ng-repo/src/errors.rs
  30. 185
      ng-repo/src/file.rs
  31. 13
      ng-repo/src/repo.rs
  32. 9
      ng-repo/src/store.rs
  33. 72
      ng-repo/src/types.rs
  34. 9
      ng-repo/src/utils.rs
  35. 11
      ng-sdk-js/js/browser.js
  36. 4
      ng-sdk-js/js/node.js
  37. 135
      ng-sdk-js/src/lib.rs
  38. 9
      ng-storage-rocksdb/src/block_storage.rs
  39. 1
      ng-verifier/Cargo.toml
  40. 142
      ng-verifier/src/commits/mod.rs
  41. 2
      ng-verifier/src/lib.rs
  42. 220
      ng-verifier/src/request_processor.rs
  43. 11
      ng-verifier/src/rocksdb_user_storage.rs
  44. 14
      ng-verifier/src/site.rs
  45. 128
      ng-verifier/src/types.rs
  46. 110
      ng-verifier/src/user_storage/storage.rs
  47. 670
      ng-verifier/src/verifier.rs
  48. 22
      ng-wallet/src/lib.rs
  49. 20
      ng-wallet/src/types.rs
  50. 15
      ngcli/src/main.rs
  51. 24
      ngd/src/main.rs
  52. 6
      ngone/src/main.rs

7
Cargo.lock generated

@ -384,9 +384,9 @@ dependencies = [
[[package]]
name = "async-recursion"
version = "1.0.4"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba"
checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
dependencies = [
"proc-macro2",
"quote",
@ -3255,6 +3255,7 @@ dependencies = [
"ng-repo",
"ng-wallet",
"serde",
"serde_bytes",
"serde_json",
"tauri",
"tauri-build",
@ -3318,6 +3319,7 @@ name = "ng-net"
version = "0.1.0"
dependencies = [
"async-broadcast 0.4.1",
"async-recursion",
"async-std",
"async-trait",
"base64-url",
@ -3421,6 +3423,7 @@ name = "ng-verifier"
version = "0.1.0"
dependencies = [
"async-std",
"async-trait",
"automerge",
"blake3",
"chacha20",

@ -8,8 +8,8 @@
// according to those terms.
use nextgraph::local_broker::{
doc_fetch, init_local_broker, session_start, session_stop, user_connect, user_disconnect,
wallet_close, wallet_create_v0, wallet_get, wallet_get_file, wallet_import,
app_request, app_request_stream, init_local_broker, session_start, session_stop, user_connect,
user_disconnect, wallet_close, wallet_create_v0, wallet_get, wallet_get_file, wallet_import,
wallet_open_with_pazzle_words, wallet_read_file, wallet_was_opened, LocalBrokerConfig,
SessionConfig,
};

@ -8,8 +8,8 @@
// according to those terms.
use nextgraph::local_broker::{
doc_fetch, init_local_broker, session_start, session_stop, user_connect, user_disconnect,
wallet_close, wallet_create_v0, wallet_get, wallet_get_file, wallet_import,
app_request, app_request_stream, init_local_broker, session_start, session_stop, user_connect,
user_disconnect, wallet_close, wallet_create_v0, wallet_get, wallet_get_file, wallet_import,
wallet_open_with_pazzle, wallet_open_with_pazzle_words, wallet_read_file, wallet_was_opened,
LocalBrokerConfig, SessionConfig,
};

@ -8,8 +8,8 @@
// according to those terms.
use nextgraph::local_broker::{
doc_fetch, init_local_broker, session_start, session_stop, user_connect, user_disconnect,
wallet_close, wallet_create_v0, wallet_get, wallet_get_file, wallet_import,
app_request, app_request_stream, init_local_broker, session_start, session_stop, user_connect,
user_disconnect, wallet_close, wallet_create_v0, wallet_get, wallet_get_file, wallet_import,
wallet_open_with_pazzle_words, wallet_read_file, wallet_was_opened, LocalBrokerConfig,
SessionConfig,
};

@ -23,7 +23,7 @@ use once_cell::sync::Lazy;
use serde_bare::to_vec;
use serde_json::json;
use std::collections::HashMap;
use std::fs::{read, write, File, OpenOptions};
use std::fs::{read, remove_file, write, File, OpenOptions};
use std::path::PathBuf;
use zeroize::{Zeroize, ZeroizeOnDrop};
@ -54,6 +54,7 @@ pub struct JsStorageConfig {
pub session_read: Arc<Box<JsStorageReadFn>>,
pub session_write: Arc<Box<JsStorageWriteFn>>,
pub session_del: Arc<Box<JsStorageDelFn>>,
pub clear: Arc<Box<JsCallback>>,
pub is_browser: bool,
}
@ -111,6 +112,7 @@ impl JsStorageConfig {
outbox_read_function: Box::new(
move |peer_id: PubKey| -> Result<Vec<Vec<u8>>, NgError> {
let start_key = format!("ng_outboxes@{}@start", peer_id);
//log_info!("search start key {}", start_key);
let res = (session_read4)(start_key.clone());
let _start = match res {
Err(_) => return Err(NgError::JsStorageKeyNotFound),
@ -123,6 +125,7 @@ impl JsStorageConfig {
loop {
let idx_str = format!("{:05}", idx);
let str = format!("ng_outboxes@{}@{idx_str}", peer_id);
//log_info!("search key {}", str);
let res = (session_read4)(str.clone());
let res = match res {
Err(_) => break,
@ -369,7 +372,6 @@ impl fmt::Debug for OpenedWallet {
}
}
#[derive(Debug)]
struct LocalBroker {
pub config: LocalBrokerConfig,
@ -382,12 +384,29 @@ struct LocalBroker {
pub opened_sessions: HashMap<UserId, u64>,
pub opened_sessions_list: Vec<Option<Session>>,
tauri_streams: HashMap<String, CancelFn>,
}
impl fmt::Debug for LocalBroker {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "LocalBroker.\nconfig {:?}", self.config)?;
writeln!(f, "wallets {:?}", self.wallets)?;
writeln!(f, "opened_wallets {:?}", self.opened_wallets)?;
writeln!(f, "sessions {:?}", self.sessions)?;
writeln!(f, "opened_sessions {:?}", self.opened_sessions)?;
writeln!(f, "opened_sessions_list {:?}", self.opened_sessions_list)
}
}
// used to deliver events to the verifier on Clients, or Core that have Verifiers attached.
#[async_trait::async_trait]
impl ILocalBroker for LocalBroker {
async fn deliver(&mut self, event: Event) {}
async fn deliver(&mut self, event: Event, overlay: OverlayId, user_id: UserId) {
if let Some(session) = self.get_mut_session_for_user(&user_id) {
session.verifier.deliver(event, overlay).await;
}
}
}
// this is used if an Actor does a BROKER.local_broker.respond
@ -420,6 +439,21 @@ impl LocalBroker {
// }
// }
/// helper function to store the sender of a tauri stream in order to be able to cancel it later on
/// only used in Tauri, not used in the JS SDK
fn tauri_stream_add(&mut self, stream_id: String, cancel: CancelFn) {
self.tauri_streams.insert(stream_id, cancel);
}
/// helper function to cancel a tauri stream
/// only used in Tauri, not used in the JS SDK
fn tauri_stream_cancel(&mut self, stream_id: String) {
let s = self.tauri_streams.remove(&stream_id);
if let Some(cancel) = s {
cancel();
}
}
fn get_mut_session_for_user(&mut self, user: &UserId) -> Option<&mut Session> {
match self.opened_sessions.get(user) {
Some(idx) => self.opened_sessions_list[*idx as usize].as_mut(),
@ -604,7 +638,7 @@ impl LocalBroker {
let credentials = match opened_wallet.wallet.individual_site(&user_id) {
Some(creds) => creds,
None => match user_priv_key {
Some(user_pk) => (user_pk, None, None),
Some(user_pk) => (user_pk, None, None, None, None),
None => return Err(NgError::NotFound),
},
};
@ -745,6 +779,8 @@ impl LocalBroker {
user_priv_key: credentials.0,
private_store_read_cap: credentials.1,
private_store_id: credentials.2,
protected_store_id: credentials.3,
public_store_id: credentials.4,
},
block_storage,
)?;
@ -794,7 +830,7 @@ impl LocalBroker {
let lws_ser = LocalWalletStorage::v0_to_vec(&wallets_to_be_saved);
let r = write(path.clone(), &lws_ser);
if r.is_err() {
log_debug!("write error {:?} {}", path, r.unwrap_err());
log_err!("write error {:?} {}", path, r.unwrap_err());
return Err(NgError::IoError);
}
}
@ -815,11 +851,17 @@ async fn init_(config: LocalBrokerConfig) -> Result<Arc<RwLock<LocalBroker>>, Ng
// load the wallets and sessions from disk
let mut path = base_path.clone();
path.push("wallets");
let map_ser = read(path);
let map_ser = read(path.clone());
if map_ser.is_ok() {
let wallets = LocalWalletStorage::v0_from_vec(&map_ser.unwrap())?;
let LocalWalletStorage::V0(wallets) = wallets;
wallets
let wallets = LocalWalletStorage::v0_from_vec(&map_ser.unwrap());
if wallets.is_err() {
log_err!("Load LocalWalletStorage error: {:?}", wallets.unwrap_err());
let _ = remove_file(path);
HashMap::new()
} else {
let LocalWalletStorage::V0(wallets) = wallets.unwrap();
wallets
}
} else {
HashMap::new()
}
@ -829,11 +871,26 @@ async fn init_(config: LocalBrokerConfig) -> Result<Arc<RwLock<LocalBroker>>, Ng
match (js_storage_config.local_read)("ng_wallets".to_string()) {
Err(_) => HashMap::new(),
Ok(wallets_string) => {
let map_ser = base64_url::decode(&wallets_string)
.map_err(|_| NgError::SerializationError)?;
let wallets: LocalWalletStorage = serde_bare::from_slice(&map_ser)?;
let LocalWalletStorage::V0(v0) = wallets;
v0
match base64_url::decode(&wallets_string)
.map_err(|_| NgError::SerializationError)
{
Err(e) => {
log_err!("Load wallets error: {:?}", e);
(js_storage_config.clear)();
HashMap::new()
}
Ok(map_ser) => match serde_bare::from_slice(&map_ser) {
Err(e) => {
log_err!("Load LocalWalletStorage error: {:?}", e);
(js_storage_config.clear)();
HashMap::new()
}
Ok(wallets) => {
let LocalWalletStorage::V0(v0) = wallets;
v0
}
},
}
}
}
}
@ -846,14 +903,16 @@ async fn init_(config: LocalBrokerConfig) -> Result<Arc<RwLock<LocalBroker>>, Ng
sessions: HashMap::new(),
opened_sessions: HashMap::new(),
opened_sessions_list: vec![],
tauri_streams: HashMap::new(),
};
//log_debug!("{:?}", &local_broker);
let broker = Arc::new(RwLock::new(local_broker));
BROKER.write().await.set_local_broker(Arc::clone(
&(Arc::clone(&broker) as Arc<RwLock<dyn ILocalBroker>>),
));
BROKER
.write()
.await
.set_local_broker(Arc::clone(&broker) as Arc<RwLock<dyn ILocalBroker>>);
Ok(broker)
}
@ -868,6 +927,27 @@ pub async fn init_local_broker_with_lazy(config_fn: &Lazy<Box<ConfigInitFn>>) {
.await;
}
#[doc(hidden)]
pub async fn tauri_stream_add(stream_id: String, cancel: CancelFn) -> Result<(), NgError> {
let mut broker = match LOCAL_BROKER.get() {
None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
Some(Ok(broker)) => broker.write().await,
};
broker.tauri_stream_add(stream_id, cancel);
Ok(())
}
#[doc(hidden)]
pub async fn tauri_stream_cancel(stream_id: String) -> Result<(), NgError> {
let mut broker = match LOCAL_BROKER.get() {
None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
Some(Ok(broker)) => broker.write().await,
};
broker.tauri_stream_cancel(stream_id);
Ok(())
}
/// Initialize the configuration of your local broker
///
/// , by passing in a function (or closure) that returns a `LocalBrokerConfig`.
@ -1415,11 +1495,64 @@ pub async fn wallet_remove(wallet_name: String) -> Result<(), NgError> {
Ok(())
}
/// fetches a document's content, or performs a mutation on the document.
pub async fn doc_fetch(
// /// fetches a document's content.
// pub async fn doc_fetch_nuri(
// session_id: u64,
// nuri: String,
// payload: Option<AppRequestPayload>,
// ) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
// let mut broker = match LOCAL_BROKER.get() {
// None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
// Some(Ok(broker)) => broker.write().await,
// };
// if session_id as usize >= broker.opened_sessions_list.len() {
// return Err(NgError::InvalidArgument);
// }
// let session = broker.opened_sessions_list[session_id as usize]
// .as_mut()
// .ok_or(NgError::SessionNotFound)?;
// session.verifier.doc_fetch_nuri(nuri, payload, true).await
// }
// /// fetches the private store home page and subscribes to its updates.
// pub async fn doc_fetch_private(
// session_id: u64,
// ) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
// let mut broker = match LOCAL_BROKER.get() {
// None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
// Some(Ok(broker)) => broker.write().await,
// };
// if session_id as usize >= broker.opened_sessions_list.len() {
// return Err(NgError::InvalidArgument);
// }
// let session = broker.opened_sessions_list[session_id as usize]
// .as_mut()
// .ok_or(NgError::SessionNotFound)?;
// session.verifier.doc_fetch_private(true).await
// }
/// process any type of app request that returns a single value
pub async fn app_request(session_id: u64, request: AppRequest) -> Result<AppResponse, NgError> {
let mut broker = match LOCAL_BROKER.get() {
None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
Some(Ok(broker)) => broker.write().await,
};
if session_id as usize >= broker.opened_sessions_list.len() {
return Err(NgError::InvalidArgument);
}
let session = broker.opened_sessions_list[session_id as usize]
.as_mut()
.ok_or(NgError::SessionNotFound)?;
session.verifier.app_request(request).await
}
/// process any type of app request that returns a stream of values
pub async fn app_request_stream(
session_id: u64,
nuri: String,
payload: Option<AppRequestPayload>,
request: AppRequest,
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
let mut broker = match LOCAL_BROKER.get() {
None | Some(Err(_)) => return Err(NgError::LocalBrokerNotInitialized),
@ -1432,7 +1565,7 @@ pub async fn doc_fetch(
.as_mut()
.ok_or(NgError::SessionNotFound)?;
session.verifier.doc_fetch(nuri, payload)
session.verifier.app_request_stream(request).await
}
/// retrieves the ID of one of the 3 stores of a the personal Site (3P: public, protected, or private)

@ -29,6 +29,7 @@ tauri = { version = "2.0.0-alpha.14", features = [] }
# tauri = { git = "https://github.com/simonhyll/tauri.git", branch="fix/ipc-mixup", features = [] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_bytes = "0.11.7"
ng-repo = { path = "../../ng-repo" }
ng-net = { path = "../../ng-net" }
ng-client-ws = { path = "../../ng-client-ws" }

@ -8,6 +8,7 @@
// according to those terms.
use async_std::stream::StreamExt;
use nextgraph::local_broker::*;
use nextgraph::verifier::types::*;
use ng_net::broker::*;
use ng_net::types::{ClientInfo, CreateAccountBSP, Invitation};
use ng_net::utils::{decode_invitation_string, spawn_and_log_error, Receiver, ResultSend};
@ -234,29 +235,39 @@ async fn decode_invitation(invite: String) -> Option<Invitation> {
}
#[tauri::command(rename_all = "snake_case")]
async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) -> Result<(), ()> {
log_debug!("doc_sync_branch {} {}", nuri, stream_id);
async fn app_request_stream(
session_id: u64,
request: AppRequest,
stream_id: &str,
app: tauri::AppHandle,
) -> Result<(), String> {
log_debug!("app request stream {} {:?}", stream_id, request);
let main_window = app.get_window("main").unwrap();
let mut reader;
let reader;
{
let mut sender;
let mut broker = BROKER.write().await;
(reader, sender) = broker.doc_sync_branch(nuri.to_string().clone()).await;
broker.tauri_stream_add(stream_id.to_string(), sender);
let cancel;
(reader, cancel) = nextgraph::local_broker::app_request_stream(session_id, request)
.await
.map_err(|e| e.to_string())?;
nextgraph::local_broker::tauri_stream_add(stream_id.to_string(), cancel)
.await
.map_err(|e| e.to_string())?;
}
async fn inner_task(
mut reader: Receiver<Commit>,
mut reader: Receiver<AppResponse>,
stream_id: String,
main_window: tauri::Window,
) -> ResultSend<()> {
while let Some(commit) = reader.next().await {
main_window.emit(&stream_id, commit).unwrap();
while let Some(app_response) = reader.next().await {
main_window.emit(&stream_id, app_response).unwrap();
}
BROKER.write().await.tauri_stream_cancel(stream_id);
nextgraph::local_broker::tauri_stream_cancel(stream_id)
.await
.map_err(|e| e.to_string())?;
log_debug!("END OF LOOP");
Ok(())
@ -268,13 +279,59 @@ async fn doc_sync_branch(nuri: &str, stream_id: &str, app: tauri::AppHandle) ->
}
#[tauri::command(rename_all = "snake_case")]
async fn cancel_doc_sync_branch(stream_id: &str) -> Result<(), ()> {
log_debug!("cancel stream {}", stream_id);
BROKER
.write()
async fn doc_fetch_private_subscribe() -> Result<AppRequest, String> {
let request = AppRequest::V0(AppRequestV0 {
command: AppRequestCommandV0::Fetch(AppFetchContentV0::get_or_subscribe(true)),
nuri: NuriV0::new_private_store_target(),
payload: None,
});
Ok(request)
}
#[tauri::command(rename_all = "snake_case")]
async fn app_request(
session_id: u64,
request: AppRequest,
app: tauri::AppHandle,
) -> Result<AppResponse, String> {
log_debug!("app request {:?}", request);
nextgraph::local_broker::app_request(session_id, request)
.await
.tauri_stream_cancel(stream_id.to_string());
Ok(())
.map_err(|e| e.to_string())
}
#[tauri::command(rename_all = "snake_case")]
async fn upload_chunk(
session_id: u64,
upload_id: u32,
chunk: serde_bytes::ByteBuf,
nuri: NuriV0,
app: tauri::AppHandle,
) -> Result<AppResponse, String> {
log_debug!("upload_chunk {:?}", chunk);
let request = AppRequest::V0(AppRequestV0 {
command: AppRequestCommandV0::FilePut,
nuri,
payload: Some(AppRequestPayload::V0(
AppRequestPayloadV0::RandomAccessFilePutChunk((upload_id, chunk)),
)),
});
nextgraph::local_broker::app_request(session_id, request)
.await
.map_err(|e| e.to_string())
}
#[tauri::command(rename_all = "snake_case")]
async fn cancel_stream(stream_id: &str) -> Result<(), String> {
log_debug!("cancel stream {}", stream_id);
Ok(
nextgraph::local_broker::tauri_stream_cancel(stream_id.to_string())
.await
.map_err(|e: NgError| e.to_string())?,
)
}
#[tauri::command(rename_all = "snake_case")]
@ -304,32 +361,6 @@ async fn disconnections_subscribe(app: tauri::AppHandle) -> Result<(), ()> {
Ok(())
}
#[tauri::command(rename_all = "snake_case")]
async fn doc_get_file_from_store_with_object_ref(
nuri: &str,
obj_ref: ObjectRef,
) -> Result<ObjectContent, String> {
log_debug!(
"doc_get_file_from_store_with_object_ref {} {:?}",
nuri,
obj_ref
);
// let ret = ObjectContent::File(File::V0(FileV0 {
// content_type: "text/plain".to_string(),
// metadata: vec![],
// content: vec![45; 20],
// }));
// Ok(ret)
let obj_content = BROKER
.write()
.await
.get_object_from_store_with_object_ref(nuri.to_string(), obj_ref)
.await
.map_err(|e| e.to_string())?;
Ok(obj_content)
}
#[tauri::command(rename_all = "snake_case")]
async fn session_stop(user_id: UserId) -> Result<(), String> {
nextgraph::local_broker::session_stop(&user_id)
@ -438,9 +469,6 @@ impl AppBuilder {
.plugin(tauri_plugin_window::init())
.invoke_handler(tauri::generate_handler![
test,
doc_sync_branch,
cancel_doc_sync_branch,
doc_get_file_from_store_with_object_ref,
wallet_gen_shuffle_for_pazzle_opening,
wallet_gen_shuffle_for_pin,
wallet_open_with_pazzle,
@ -461,6 +489,11 @@ impl AppBuilder {
user_connect,
user_disconnect,
client_info_rust,
doc_fetch_private_subscribe,
cancel_stream,
app_request_stream,
app_request,
upload_chunk,
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

@ -13,7 +13,6 @@ import {version} from '../package.json';
const mapping = {
"doc_get_file_from_store_with_object_ref": [ "nuri","obj_ref" ],
"wallet_gen_shuffle_for_pazzle_opening": ["pazzle_length"],
"wallet_gen_shuffle_for_pin": [],
"wallet_open_with_pazzle": ["wallet","pazzle","pin"],
@ -32,7 +31,9 @@ const mapping = {
"decode_invitation": ["invite"],
"user_connect": ["info","user_id","location"],
"user_disconnect": ["user_id"],
"test": [ ]
"app_request": ["session_id","request"],
"test": [ ],
"doc_fetch_private_subscribe": []
}
@ -113,29 +114,25 @@ const handler = {
e[1].since = new Date(e[1].since);
}
return ret;
}else if (path[0] === "doc_sync_branch") {
}
else if (path[0] === "app_request_stream") {
let stream_id = (lastStreamId += 1).toString();
console.log("stream_id",stream_id);
let { getCurrent } = await import("@tauri-apps/plugin-window");
let nuri = args[0];
let callback = args[1];
let session_id = args[0];
let request = args[1];
let callback = args[2];
let unlisten = await getCurrent().listen(stream_id, (event) => {
callback(event.payload).then(()=> {})
})
await tauri.invoke("doc_sync_branch",{nuri, stream_id});
await tauri.invoke("app_request_stream",{session_id, stream_id, request});
return () => {
unlisten();
tauri.invoke("cancel_doc_sync_branch", {stream_id});
tauri.invoke("cancel_stream", {stream_id});
}
} else if (path[0] === "doc_get_file_from_store_with_object_ref") {
let arg = {};
args.map((el,ix) => arg[mapping[path[0]][ix]]=el)
let res = await tauri.invoke(path[0],arg);
res['File'].V0.content = Uint8Array.from(res['File'].V0.content);
res['File'].V0.metadata = Uint8Array.from(res['File'].V0.metadata);
return res;
} else if (path[0] === "get_wallets") {
let res = await tauri.invoke(path[0],{});
if (res) for (let e of Object.entries(res)) {
@ -143,6 +140,13 @@ const handler = {
}
return res || {};
} else if (path[0] === "upload_chunk") {
let session_id = args[0];
let upload_id = args[1];
let chunk = args[2];
let nuri = args[3];
chunk = Array.from(new Uint8Array(chunk));
return await tauri.invoke(path[0],{session_id, upload_id, chunk, nuri})
} else if (path[0] === "wallet_create") {
let params = args[0];
params.result_with_wallet_file = false;

@ -12,6 +12,7 @@
<script lang="ts">
import { online } from "../store";
import FullLayout from "./FullLayout.svelte";
import Test from "./Test.svelte";
import { PaperAirplane, Bell, ArrowRightOnRectangle } from "svelte-heros-v2";
// @ts-ignore
import Logo from "../assets/nextgraph.svg?component";
@ -76,5 +77,7 @@
</nav>
{/if}
<div />
<Test />
</FullLayout>
<svelte:window bind:innerWidth={width} />

@ -11,75 +11,188 @@
<script lang="ts">
import ng from "../api";
import branch_commits from "../store";
let name = "";
let greetMsg = "";
let commits = branch_commits("ok", false);
import { branch_subs, active_session } from "../store";
import { link } from "svelte-spa-router";
let files = branch_subs("ok");
let img_map = {};
async function get_img(ref) {
if (!ref) return false;
let cache = img_map[ref];
let cache = img_map[ref.nuri];
if (cache) {
return cache;
}
try {
//console.log(JSON.stringify(ref));
let file = await ng.doc_get_file_from_store_with_object_ref("ng:", ref);
//console.log(file);
var blob = new Blob([file["File"].V0.content], {
type: file["File"].V0.content_type,
});
var imageUrl = URL.createObjectURL(blob);
img_map[ref] = imageUrl;
return imageUrl;
} catch (e) {
console.error(e);
return false;
}
}
let prom = new Promise(async (resolve) => {
try {
let nuri = {
target: "PrivateStore",
entire_store: false,
access: [{ Key: ref.reference.key }],
locator: [],
object: ref.reference.id,
};
async function greet() {
//greetMsg = await ng.create_wallet(name);
// cancel = await ng.doc_sync_branch("ok", async (commit) => {
// console.log(commit);
// try {
// let file = await ng.doc_get_file_from_store_with_object_ref(
// "ng:",
// commit.V0.content.refs[0]
// );
// console.log(file);
// var blob = new Blob([file["File"].V0.content], {
// type: file["File"].V0.content_type,
// });
// var imageUrl = URL.createObjectURL(blob);
// url = imageUrl;
// } catch (e) {
// console.error(e);
// }
// });
//cancel();
let file_request = {
V0: {
command: "FileGet",
nuri,
},
};
let final_blob;
let content_type;
let unsub = await ng.app_request_stream(
$active_session.session_id,
file_request,
async (blob) => {
//console.log("GOT APP RESPONSE", blob);
if (blob.V0.FileMeta) {
content_type = blob.V0.FileMeta.content_type;
final_blob = new Blob([], { type: content_type });
} else if (blob.V0.FileBinary) {
if (blob.V0.FileBinary.byteLength > 0) {
final_blob = new Blob([final_blob, blob.V0.FileBinary], {
type: content_type,
});
} else {
var imageUrl = URL.createObjectURL(final_blob);
resolve(imageUrl);
}
}
}
);
} catch (e) {
console.error(e);
resolve(false);
}
});
img_map[ref.nuri] = prom;
return prom;
}
let fileinput;
const onFileSelected = (e) => {
function uploadFile(upload_id, nuri, file, success) {
let chunkSize = 1024 * 1024;
let fileSize = file.size;
let offset = 0;
let readBlock = null;
let onLoadHandler = async function (event) {
let result = event.target.result;
if (event.target.error == null) {
offset += event.target.result.byteLength;
//console.log("chunk", event.target.result);
let res = await ng.upload_chunk(
$active_session.session_id,
upload_id,
event.target.result,
nuri
);
//console.log("chunk upload res", res);
// if (onChunkRead) {
// onChunkRead(result);
// }
} else {
// if (onChunkError) {
// onChunkError(event.target.error);
// }
return;
}
if (offset >= fileSize) {
//console.log("file uploaded");
let res = await ng.upload_chunk(
$active_session.session_id,
upload_id,
[],
nuri
);
//console.log("end upload res", res);
if (success) {
success(res);
}
return;
}
readBlock(offset, chunkSize, file);
};
readBlock = function (offset, length, file) {
let fileReader = new FileReader();
let blob = file.slice(offset, length + offset);
fileReader.onload = onLoadHandler;
fileReader.readAsArrayBuffer(blob);
};
readBlock(offset, chunkSize, file);
return;
}
const onFileSelected = async (e) => {
let image = e.target.files[0];
let reader = new FileReader();
reader.readAsArrayBuffer(image);
reader.onload = (e) => {
console.log(e.target.result);
if (!image) return;
//console.log(image);
let nuri = {
target: "PrivateStore",
entire_store: false,
access: [],
locator: [],
};
let start_request = {
V0: {
command: "FilePut",
nuri,
payload: {
V0: {
RandomAccessFilePut: image.type,
},
},
},
};
let start_res = await ng.app_request(
$active_session.session_id,
start_request
);
let upload_id = start_res.V0.FileUploading;
uploadFile(upload_id, nuri, image, async (reference) => {
if (reference) {
let request = {
V0: {
command: "FilePut",
nuri,
payload: {
V0: {
AddFile: {
filename: image.name,
object: reference.V0.FileUploaded,
},
},
},
},
};
await ng.app_request($active_session.session_id, request);
}
});
fileinput.value = "";
};
</script>
<div>
<!-- <div class="row">
<input id="greet-input" placeholder="Enter a name..." bind:value={name} />
<button on:click={greet}> Greet </button>
</div> -->
<div class="row mt-2">
<!-- <a use:link href="/">
<button tabindex="-1" class=" mr-5 select-none"> Back home </button>
</a> -->
<button
type="button"
on:click={() => {
@ -112,13 +225,15 @@
bind:this={fileinput}
/>
</div>
<!-- <p>{greetMsg}</p> -->
{#await commits.load()}
{#await files.load()}
<p>Currently loading...</p>
{:then}
{#each $commits as commit}
{#each $files as file}
<p>
{#await get_img(commit.V0.header.V0.files[0]) then url}
{file.V0.File.name}<br />
did:ng{file.V0.File.nuri}
{#await get_img(file.V0.File) then url}
{#if url}
<img src={url} />
{/if}

@ -12,11 +12,27 @@
<script lang="ts">
import Test from "../lib/Test.svelte";
export let params = {};
import { active_session } from "../store";
import { onMount, tick } from "svelte";
import { link, push } from "svelte-spa-router";
let top;
async function scrollToTop() {
await tick();
top.scrollIntoView();
}
onMount(async () => {
if (!$active_session) {
push("#/");
} else {
await scrollToTop();
}
});
</script>
<main class="container3">
<h1>Welcome to test</h1>
<div class="row">
<div class="row" bind:this={top}>
<Test />
</div>
</main>

@ -127,7 +127,7 @@ can_connect.subscribe(async (value) => {
}
});
const branch_commits = (nura, sub) => {
export const branch_subs = function(nura) {
// console.log("branch_commits")
// const { subscribe, set, update } = writable([]); // create the underlying writable store
@ -162,8 +162,10 @@ const branch_commits = (nura, sub) => {
let already_subscribed = all_branches[nura];
if (!already_subscribed) return;
if (already_subscribed.load) {
await already_subscribed.load();
let loader = already_subscribed.load;
already_subscribed.load = undefined;
await loader();
}
},
subscribe: (run, invalid) => {
@ -175,10 +177,22 @@ const branch_commits = (nura, sub) => {
let unsub = () => {};
already_subscribed = {
load: async () => {
unsub = await ng.doc_sync_branch(nura, async (commit) => {
console.log("GOT COMMIT", commit);
update( (old) => {old.unshift(commit); return old;} )
});
try {
let session = get(active_session);
if (!session) {
console.error("no session");
return;
}
await unsub();
unsub = await ng.app_request_stream(session.session_id, await ng.doc_fetch_private_subscribe(),
async (commit) => {
//console.log("GOT APP RESPONSE", commit);
update( (old) => {old.unshift(commit); return old;} )
});
}
catch (e) {
console.error(e);
}
// this is in case decrease has been called before the load function returned.
if (count == 0) {unsub();}
},
@ -186,10 +200,10 @@ const branch_commits = (nura, sub) => {
count += 1;
return readonly({subscribe});
},
decrease: () => {
decrease: async () => {
count -= 1;
if (count == 0) {
unsub();
await unsub();
delete all_branches[nura];
}
},
@ -199,13 +213,13 @@ const branch_commits = (nura, sub) => {
let new_store = already_subscribed.increase();
let read_unsub = new_store.subscribe(run, invalid);
return () => {
return async () => {
read_unsub();
already_subscribed.decrease();
await already_subscribed.decrease();
}
}
}
};
export default branch_commits;
//export default branch_commits;

@ -455,7 +455,7 @@ impl RocksDbServerStorage {
) -> Result<TopicSubRes, ServerError> {
let overlay = self.check_overlay(overlay)?;
// now we check that the repo was previously pinned.
// if it was opened but not pinned, then this should be deal with in the ServerBroker, in memory, not here)
// if it was opened but not pinned, then this should be dealt with in the ServerBroker, in memory, not here)
let is_publisher = publisher.is_some();
// (we already checked that the advert is valid)
@ -502,7 +502,45 @@ impl RocksDbServerStorage {
Ok(blocks)
}
fn add_block(
pub(crate) fn has_block(
&self,
overlay: &OverlayId,
block_id: &BlockId,
) -> Result<(), ServerError> {
let overlay = self.check_overlay(overlay)?;
let overlay = &overlay;
Ok(self.block_storage.read().unwrap().has(overlay, block_id)?)
}
pub(crate) fn get_block(
&self,
overlay: &OverlayId,
block_id: &BlockId,
) -> Result<Block, ServerError> {
let overlay = self.check_overlay(overlay)?;
let overlay = &overlay;
Ok(self.block_storage.read().unwrap().get(overlay, block_id)?)
}
pub(crate) fn add_block(
&self,
overlay: &OverlayId,
block: Block,
) -> Result<BlockId, ServerError> {
if overlay.is_outer() {
// we don't publish events on the outer overlay!
return Err(ServerError::OverlayMismatch);
}
let overlay = self.check_overlay(overlay)?;
let overlay = &overlay;
let mut overlay_storage = OverlayStorage::new(overlay, &self.core_storage);
Ok(self.add_block_(overlay, &mut overlay_storage, block)?)
}
fn add_block_(
&self,
overlay_id: &OverlayId,
overlay_storage: &mut OverlayStorage,
@ -522,7 +560,7 @@ impl RocksDbServerStorage {
overlay: &OverlayId,
mut event: Event,
user_id: &UserId,
) -> Result<(), ServerError> {
) -> Result<TopicId, ServerError> {
if overlay.is_outer() {
// we don't publish events on the outer overlay!
return Err(ServerError::OverlayMismatch);
@ -532,12 +570,13 @@ impl RocksDbServerStorage {
// TODO: check that the sequence number is correct
let topic = *event.topic_id();
// check that the topic exists and that this user has pinned it as publisher
let mut topic_storage = TopicStorage::open(event.topic_id(), overlay, &self.core_storage)
.map_err(|e| match e {
StorageError::NotFound => ServerError::TopicNotFound,
_ => e.into(),
})?;
let mut topic_storage =
TopicStorage::open(&topic, overlay, &self.core_storage).map_err(|e| match e {
StorageError::NotFound => ServerError::TopicNotFound,
_ => e.into(),
})?;
let is_publisher = TopicStorage::USERS
.get(&mut topic_storage, user_id)
.map_err(|e| match e {
@ -557,7 +596,7 @@ impl RocksDbServerStorage {
let temp_mini_block_storage = HashMapBlockStorage::new();
for block in v0.content.blocks {
let _ = temp_mini_block_storage.put(overlay, &block, false)?;
extracted_blocks_ids.push(self.add_block(
extracted_blocks_ids.push(self.add_block_(
overlay,
&mut overlay_storage,
block,
@ -604,7 +643,7 @@ impl RocksDbServerStorage {
}
}
Ok(())
Ok(topic)
}
pub(crate) fn topic_sync_req(

@ -21,6 +21,8 @@ use ng_repo::{
};
use serde::{Deserialize, Serialize};
use ng_repo::log::*;
use crate::rocksdb_server_storage::RocksDbServerStorage;
pub struct TopicInfo {
@ -113,6 +115,8 @@ pub struct ServerBroker {
overlays: HashMap<OverlayId, OverlayInfo>,
inner_overlays: HashMap<OverlayId, Option<OverlayId>>,
local_subscriptions: HashMap<(OverlayId, TopicId), HashSet<PubKey>>,
}
impl ServerBroker {
@ -121,22 +125,77 @@ impl ServerBroker {
storage: storage,
overlays: HashMap::new(),
inner_overlays: HashMap::new(),
local_subscriptions: HashMap::new(),
}
}
pub fn load(&mut self) -> Result<(), NgError> {
Ok(())
}
fn add_subscription(
&mut self,
overlay: OverlayId,
topic: TopicId,
peer: PubKey,
) -> Result<(), ServerError> {
let peers_set = self
.local_subscriptions
.entry((overlay, topic))
.or_insert(HashSet::with_capacity(1));
log_debug!(
"SUBSCRIBING PEER {} TOPIC {} OVERLAY {}",
peer,
topic,
overlay
);
if !peers_set.insert(peer) {
//return Err(ServerError::PeerAlreadySubscribed);
}
Ok(())
}
fn remove_subscription(
&mut self,
overlay: &OverlayId,
topic: &TopicId,
peer: &PubKey,
) -> Result<(), ServerError> {
let peers_set = self
.local_subscriptions
.get_mut(&(*overlay, *topic))
.ok_or(ServerError::SubscriptionNotFound)?;
if !peers_set.remove(peer) {
return Err(ServerError::SubscriptionNotFound);
}
Ok(())
}
}
//TODO: the purpose of this trait is to have a level of indirection so we can keep some data in memory (cache) and avoid hitting the storage backend (rocksdb) at every call.
//for now this cache is not implemented, but the structs are ready (see above), and it would just require to change slightly the implementation of the trait functions here below.
impl IServerBroker for ServerBroker {
fn has_block(&self, overlay_id: &OverlayId, block_id: &BlockId) -> Result<(), ServerError> {
self.storage.has_block(overlay_id, block_id)
}
fn get_block(&self, overlay_id: &OverlayId, block_id: &BlockId) -> Result<Block, ServerError> {
self.storage.get_block(overlay_id, block_id)
}
fn next_seq_for_peer(&self, peer: &PeerId, seq: u64) -> Result<(), ServerError> {
self.storage.next_seq_for_peer(peer, seq)
}
fn put_block(&self, overlay_id: &OverlayId, block: Block) -> Result<(), ServerError> {
self.storage.add_block(overlay_id, block)?;
Ok(())
}
fn get_user(&self, user_id: PubKey) -> Result<bool, ProtocolError> {
self.storage.get_user(user_id)
}
@ -181,7 +240,7 @@ impl IServerBroker for ServerBroker {
}
fn pin_repo_write(
&self,
&mut self,
overlay: &OverlayAccess,
repo: &RepoHash,
user_id: &UserId,
@ -189,8 +248,9 @@ impl IServerBroker for ServerBroker {
rw_topics: &Vec<PublisherAdvert>,
overlay_root_topic: &Option<TopicId>,
expose_outer: bool,
peer: &PubKey,
) -> Result<RepoOpened, ServerError> {
self.storage.pin_repo_write(
let res = self.storage.pin_repo_write(
overlay,
repo,
user_id,
@ -198,30 +258,50 @@ impl IServerBroker for ServerBroker {
rw_topics,
overlay_root_topic,
expose_outer,
)
)?;
for topic in res.iter() {
self.add_subscription(
*overlay.overlay_id_for_client_protocol_purpose(),
*topic.topic_id(),
*peer,
)?;
}
Ok(res)
}
fn pin_repo_read(
&self,
&mut self,
overlay: &OverlayId,
repo: &RepoHash,
user_id: &UserId,
ro_topics: &Vec<TopicId>,
peer: &PubKey,
) -> Result<RepoOpened, ServerError> {
self.storage
.pin_repo_read(overlay, repo, user_id, ro_topics)
let res = self
.storage
.pin_repo_read(overlay, repo, user_id, ro_topics)?;
for topic in res.iter() {
// TODO: those outer subscriptions are not handled yet. they will not emit events.
self.add_subscription(*overlay, *topic.topic_id(), *peer)?;
}
Ok(res)
}
fn topic_sub(
&self,
&mut self,
overlay: &OverlayId,
repo: &RepoHash,
topic: &TopicId,
user: &UserId,
publisher: Option<&PublisherAdvert>,
peer: &PubKey,
) -> Result<TopicSubRes, ServerError> {
self.storage
.topic_sub(overlay, repo, topic, user, publisher)
let res = self
.storage
.topic_sub(overlay, repo, topic, user, publisher)?;
self.add_subscription(*overlay, *topic, *peer)?;
Ok(res)
}
fn get_commit(&self, overlay: &OverlayId, id: &ObjectId) -> Result<Vec<Block>, ServerError> {
@ -233,8 +313,25 @@ impl IServerBroker for ServerBroker {
overlay: &OverlayId,
event: Event,
user_id: &UserId,
) -> Result<(), ServerError> {
self.storage.save_event(overlay, event, user_id)
remote_peer: &PubKey,
) -> Result<HashSet<&PubKey>, ServerError> {
let topic = self.storage.save_event(overlay, event, user_id)?;
log_debug!(
"DISPATCH EVENt {} {} {:?}",
overlay,
topic,
self.local_subscriptions
);
let mut set = self
.local_subscriptions
.get(&(*overlay, topic))
.map(|set| set.iter().collect())
.unwrap_or(HashSet::new());
set.remove(remote_peer);
Ok(set)
}
fn topic_sync_req(

@ -36,6 +36,7 @@ either = "1.8.1"
url = "2.4.0"
base64-url = "2.0.0"
web-time = "0.2.0"
async-recursion = "1.1.1"
[target.'cfg(target_arch = "wasm32")'.dependencies]
reqwest = { version = "0.11.18", features = ["json","native-tls-vendored"] }

@ -0,0 +1,102 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use crate::broker::{ServerConfig, BROKER};
use crate::connection::NoiseFSM;
use crate::types::*;
use crate::{actor::*, types::ProtocolMessage};
use async_std::sync::Mutex;
use ng_repo::errors::*;
use ng_repo::log::*;
use ng_repo::types::PubKey;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
impl BlocksExist {
pub fn get_actor(&self, id: i64) -> Box<dyn EActor> {
Actor::<BlocksExist, BlocksFound>::new_responder(id)
}
}
impl TryFrom<ProtocolMessage> for BlocksExist {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let req: ClientRequestContentV0 = msg.try_into()?;
if let ClientRequestContentV0::BlocksExist(a) = req {
Ok(a)
} else {
log_debug!("INVALID {:?}", req);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<BlocksExist> for ProtocolMessage {
fn from(msg: BlocksExist) -> ProtocolMessage {
let overlay = *msg.overlay();
ProtocolMessage::from_client_request_v0(ClientRequestContentV0::BlocksExist(msg), overlay)
}
}
impl TryFrom<ProtocolMessage> for BlocksFound {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let res: ClientResponseContentV0 = msg.try_into()?;
if let ClientResponseContentV0::BlocksFound(a) = res {
Ok(a)
} else {
log_debug!("INVALID {:?}", res);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<BlocksFound> for ProtocolMessage {
fn from(b: BlocksFound) -> ProtocolMessage {
ClientResponseContentV0::BlocksFound(b).into()
}
}
impl Actor<'_, BlocksExist, BlocksFound> {}
#[async_trait::async_trait]
impl EActor for Actor<'_, BlocksExist, BlocksFound> {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = BlocksExist::try_from(msg)?;
let broker = BROKER.read().await;
let overlay = req.overlay().clone();
let mut found = vec![];
let mut missing = vec![];
match req {
BlocksExist::V0(v0) => {
for block_id in v0.blocks {
let r = broker.get_server_broker()?.has_block(&overlay, &block_id);
if r.is_err() {
missing.push(block_id);
} else {
found.push(block_id);
}
}
}
}
let res = Ok(BlocksFound::V0(BlocksFoundV0 { found, missing }));
fsm.lock()
.await
.send_in_reply_to(res.into(), self.id())
.await?;
Ok(())
}
}

@ -0,0 +1,129 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use crate::broker::{ServerConfig, BROKER};
use crate::connection::NoiseFSM;
use crate::server_broker::IServerBroker;
use crate::types::*;
use crate::{actor::*, types::ProtocolMessage};
use async_recursion::async_recursion;
use async_std::sync::{Mutex, MutexGuard};
use ng_repo::errors::*;
use ng_repo::log::*;
use ng_repo::types::{Block, BlockId, OverlayId, PubKey};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
impl BlocksGet {
pub fn get_actor(&self, id: i64) -> Box<dyn EActor> {
Actor::<BlocksGet, Block>::new_responder(id)
}
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
pub fn set_overlay(&mut self, overlay: OverlayId) {
match self {
Self::V0(v0) => v0.overlay = Some(overlay),
}
}
}
impl TryFrom<ProtocolMessage> for BlocksGet {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let req: ClientRequestContentV0 = msg.try_into()?;
if let ClientRequestContentV0::BlocksGet(a) = req {
Ok(a)
} else {
log_debug!("INVALID {:?}", req);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<BlocksGet> for ProtocolMessage {
fn from(msg: BlocksGet) -> ProtocolMessage {
let overlay = *msg.overlay();
ProtocolMessage::from_client_request_v0(ClientRequestContentV0::BlocksGet(msg), overlay)
}
}
impl Actor<'_, BlocksGet, Block> {}
#[async_trait::async_trait]
impl EActor for Actor<'_, BlocksGet, Block> {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = BlocksGet::try_from(msg)?;
let broker = BROKER.read().await;
let server = broker.get_server_broker()?;
let mut lock = fsm.lock().await;
let mut something_was_sent = false;
#[async_recursion]
async fn process_children(
children: &Vec<BlockId>,
server: &Box<dyn IServerBroker + Send + Sync>,
overlay: &OverlayId,
lock: &mut MutexGuard<'_, NoiseFSM>,
req_id: i64,
include_children: bool,
something_was_sent: &mut bool,
) {
for block_id in children {
if let Ok(block) = server.get_block(overlay, block_id) {
let grand_children = block.children().to_vec();
if let Err(_) = lock.send_in_reply_to(block.into(), req_id).await {
break;
}
*something_was_sent = true;
if include_children {
process_children(
&grand_children,
server,
overlay,
lock,
req_id,
include_children,
something_was_sent,
)
.await;
}
}
}
}
process_children(
req.ids(),
server,
req.overlay(),
&mut lock,
self.id(),
req.include_children(),
&mut something_was_sent,
)
.await;
if !something_was_sent {
let re: Result<(), ServerError> = Err(ServerError::NotFound);
lock.send_in_reply_to(re.into(), self.id()).await?;
} else {
let re: Result<(), ServerError> = Err(ServerError::EndOfStream);
lock.send_in_reply_to(re.into(), self.id()).await?;
}
Ok(())
}
}

@ -0,0 +1,79 @@
/*
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use crate::broker::{ServerConfig, BROKER};
use crate::connection::NoiseFSM;
use crate::types::*;
use crate::{actor::*, types::ProtocolMessage};
use async_std::sync::Mutex;
use ng_repo::errors::*;
use ng_repo::log::*;
use ng_repo::types::PubKey;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
impl BlocksPut {
pub fn get_actor(&self, id: i64) -> Box<dyn EActor> {
Actor::<BlocksPut, ()>::new_responder(id)
}
}
impl TryFrom<ProtocolMessage> for BlocksPut {
type Error = ProtocolError;
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> {
let req: ClientRequestContentV0 = msg.try_into()?;
if let ClientRequestContentV0::BlocksPut(a) = req {
Ok(a)
} else {
log_debug!("INVALID {:?}", req);
Err(ProtocolError::InvalidValue)
}
}
}
impl From<BlocksPut> for ProtocolMessage {
fn from(msg: BlocksPut) -> ProtocolMessage {
let overlay = *msg.overlay();
ProtocolMessage::from_client_request_v0(ClientRequestContentV0::BlocksPut(msg), overlay)
}
}
impl Actor<'_, BlocksPut, ()> {}
#[async_trait::async_trait]
impl EActor for Actor<'_, BlocksPut, ()> {
async fn respond(
&mut self,
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = BlocksPut::try_from(msg)?;
let broker = BROKER.read().await;
let mut res: Result<(), ServerError> = Ok(());
let overlay = req.overlay().clone();
match req {
BlocksPut::V0(v0) => {
for block in v0.blocks {
let r = broker.get_server_broker()?.put_block(&overlay, block);
if r.is_err() {
res = r;
break;
}
}
}
}
fsm.lock()
.await
.send_in_reply_to(res.into(), self.id())
.await?;
Ok(())
}
}

@ -73,23 +73,31 @@ impl EActor for Actor<'_, PublishEvent, ()> {
msg: ProtocolMessage,
fsm: Arc<Mutex<NoiseFSM>>,
) -> Result<(), ProtocolError> {
let req = PublishEvent::try_from(msg)?;
#[cfg(not(target_arch = "wasm32"))]
{
let req = PublishEvent::try_from(msg)?;
// send a ProtocolError if invalid signatures (will disconnect the client)
req.event().verify()?;
// send a ProtocolError if invalid signatures (will disconnect the client)
req.event().verify()?;
let broker = BROKER.read().await;
let overlay = req.overlay().clone();
let res = broker.get_server_broker()?.dispatch_event(
&overlay,
req.take_event(),
&fsm.lock().await.user_id()?,
);
let broker = BROKER.read().await;
let overlay = req.overlay().clone();
let (user_id, remote_peer) = {
let fsm = fsm.lock().await;
(
fsm.user_id()?,
fsm.remote_peer().ok_or(ProtocolError::ActorError)?,
)
};
let res = broker
.dispatch_event(&overlay, req.take_event(), &user_id, &remote_peer)
.await;
fsm.lock()
.await
.send_in_reply_to(res.into(), self.id())
.await?;
fsm.lock()
.await
.send_in_reply_to(res.into(), self.id())
.await?;
}
Ok(())
}
}

@ -9,3 +9,9 @@ pub mod event;
pub mod commit_get;
pub mod topic_sync_req;
pub mod blocks_put;
pub mod blocks_exist;
pub mod blocks_get;

@ -106,7 +106,7 @@ impl EActor for Actor<'_, PinRepo, RepoOpened> {
) -> Result<(), ProtocolError> {
let req = PinRepo::try_from(msg)?;
let broker = BROKER.read().await;
let mut broker = BROKER.write().await;
// check the validity of the PublisherAdvert(s). this will return a ProtocolError (will close the connection)
let server_peer_id = broker.get_config().unwrap().peer_id;
@ -114,6 +114,14 @@ impl EActor for Actor<'_, PinRepo, RepoOpened> {
pub_ad.verify_for_broker(&server_peer_id)?;
}
let (user_id, remote_peer) = {
let fsm = fsm.lock().await;
(
fsm.user_id()?,
fsm.remote_peer().ok_or(ProtocolError::ActorError)?,
)
};
let result = {
match req.overlay_access() {
OverlayAccess::ReadOnly(r) => {
@ -124,11 +132,12 @@ impl EActor for Actor<'_, PinRepo, RepoOpened> {
{
Err(ServerError::InvalidRequest)
} else {
broker.get_server_broker()?.pin_repo_read(
broker.get_server_broker_mut()?.pin_repo_read(
req.overlay(),
req.hash(),
&fsm.lock().await.user_id()?,
&user_id,
req.ro_topics(),
&remote_peer,
)
}
}
@ -142,14 +151,15 @@ impl EActor for Actor<'_, PinRepo, RepoOpened> {
// TODO add a check on "|| overlay_root_topic.is_none()" because it should be mandatory to have one (not sent by client at the moment)
Err(ServerError::InvalidRequest)
} else {
broker.get_server_broker()?.pin_repo_write(
broker.get_server_broker_mut()?.pin_repo_write(
req.overlay_access(),
req.hash(),
&fsm.lock().await.user_id()?,
&user_id,
req.ro_topics(),
req.rw_topics(),
req.overlay_root_topic(),
req.expose_outer(),
&remote_peer,
)
}
}
@ -157,14 +167,15 @@ impl EActor for Actor<'_, PinRepo, RepoOpened> {
if !w.is_inner() || req.overlay() != w || req.expose_outer() {
Err(ServerError::InvalidRequest)
} else {
broker.get_server_broker()?.pin_repo_write(
broker.get_server_broker_mut()?.pin_repo_write(
req.overlay_access(),
req.hash(),
&fsm.lock().await.user_id()?,
&user_id,
req.ro_topics(),
req.rw_topics(),
req.overlay_root_topic(),
false,
&remote_peer,
)
}
}

@ -36,7 +36,7 @@ impl TopicSub {
)),
)
} else {
(repo.store.outer_overlay(), None)
(repo.store.inner_overlay(), None)
};
TopicSub::V0(TopicSubV0 {
@ -98,7 +98,7 @@ impl EActor for Actor<'_, TopicSub, TopicSubRes> {
) -> Result<(), ProtocolError> {
let req = TopicSub::try_from(msg)?;
let broker = BROKER.read().await;
let mut broker = BROKER.write().await;
// check the validity of the PublisherAdvert. this will return a ProtocolError (will close the connection)
if let Some(advert) = req.publisher() {
@ -106,12 +106,21 @@ impl EActor for Actor<'_, TopicSub, TopicSubRes> {
advert.verify_for_broker(&server_peer_id)?;
}
let res = broker.get_server_broker()?.topic_sub(
let (user_id, remote_peer) = {
let fsm = fsm.lock().await;
(
fsm.user_id()?,
fsm.remote_peer().ok_or(ProtocolError::ActorError)?,
)
};
let res = broker.get_server_broker_mut()?.topic_sub(
req.overlay(),
req.hash(),
req.topic(),
&fsm.lock().await.user_id()?,
&user_id,
req.publisher(),
&remote_peer,
);
fsm.lock()

@ -29,7 +29,7 @@ use ng_repo::object::Object;
use ng_repo::types::*;
use ng_repo::utils::generate_keypair;
use once_cell::sync::Lazy;
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::path::PathBuf;
#[derive(Debug)]
@ -67,7 +67,7 @@ pub struct ServerConfig {
#[async_trait::async_trait]
pub trait ILocalBroker: Send + Sync + EActor {
async fn deliver(&mut self, event: Event);
async fn deliver(&mut self, event: Event, overlay: OverlayId, user: UserId);
}
pub static BROKER: Lazy<Arc<RwLock<Broker>>> = Lazy::new(|| Arc::new(RwLock::new(Broker::new())));
@ -87,29 +87,15 @@ pub struct Broker<'a> {
closing: bool,
server_broker: Option<Box<dyn IServerBroker + Send + Sync + 'a>>,
tauri_streams: HashMap<String, Sender<Commit>>,
disconnections_sender: Sender<String>,
disconnections_receiver: Option<Receiver<String>>,
//local_broker: Option<Box<dyn ILocalBroker + Send + Sync + 'a>>,
local_broker: Option<Arc<RwLock<dyn ILocalBroker + 'a>>>,
users_peers: HashMap<UserId, HashSet<X25519PubKey>>,
}
impl<'a> Broker<'a> {
/// helper function to store the sender of a tauri stream in order to be able to cancel it later on
/// only used in Tauri, not used in the JS SDK
pub fn tauri_stream_add(&mut self, stream_id: String, sender: Sender<Commit>) {
self.tauri_streams.insert(stream_id, sender);
}
/// helper function to cancel a tauri stream
/// only used in Tauri, not used in the JS SDK
pub fn tauri_stream_cancel(&mut self, stream_id: String) {
let s = self.tauri_streams.remove(&stream_id);
if let Some(sender) = s {
sender.close_channel();
}
}
// pub fn init_local_broker(
// &mut self,
// base_path: Option<PathBuf>,
@ -186,6 +172,16 @@ impl<'a> Broker<'a> {
.as_ref()
.ok_or(ProtocolError::BrokerError)
}
pub fn get_server_broker_mut(
&mut self,
) -> Result<&mut Box<dyn IServerBroker + Send + Sync + 'a>, ProtocolError> {
//log_debug!("GET STORAGE {:?}", self.server_storage);
self.server_broker
.as_mut()
.ok_or(ProtocolError::BrokerError)
}
//Option<Arc<RwLock<dyn ILocalBroker>>>,
pub fn get_local_broker(&self) -> Result<Arc<RwLock<dyn ILocalBroker + 'a>>, NgError> {
Ok(Arc::clone(
@ -301,14 +297,6 @@ impl<'a> Broker<'a> {
}
}
// pub fn add_user(&self, user: PubKey, is_admin: bool) -> Result<(), ProtocolError> {
// self.get_server_broker()?.add_user(user, is_admin)
// }
// pub fn list_users(&self, admins: bool) -> Result<Vec<PubKey>, ProtocolError> {
// self.get_server_broker()?.list_users(admins)
// }
pub async fn get_block_from_store_with_block_id(
&mut self,
nuri: String,
@ -357,54 +345,53 @@ impl<'a> Broker<'a> {
// .map_err(|_| ProtocolError::ObjectParseError)
}
pub async fn doc_sync_branch(&mut self, anuri: String) -> (Receiver<Commit>, Sender<Commit>) {
let (tx, rx) = mpsc::unbounded::<Commit>();
let obj_ref = ObjectRef {
id: ObjectId::Blake3Digest32([
228, 228, 181, 117, 36, 206, 41, 223, 130, 96, 85, 195, 104, 137, 78, 145, 42, 176,
58, 244, 111, 97, 246, 39, 11, 76, 135, 150, 188, 111, 66, 33,
]),
key: SymKey::ChaCha20Key([
100, 243, 39, 242, 203, 131, 102, 50, 9, 54, 248, 113, 4, 160, 28, 45, 73, 56, 217,
112, 95, 150, 144, 137, 9, 57, 106, 5, 39, 202, 146, 94,
]),
};
let refs = vec![obj_ref.clone()];
let metadata = vec![5u8; 55];
let (member_privkey, member_pubkey) = generate_keypair();
let overlay = OverlayId::nil();
let commit = Commit::new(
&member_privkey,
&member_pubkey,
overlay,
PubKey::nil(),
QuorumType::NoSigning,
vec![],
vec![],
vec![],
vec![],
refs,
vec![],
metadata,
obj_ref.clone(),
)
.unwrap();
async fn send(mut tx: Sender<Commit>, commit: Commit) -> ResultSend<()> {
while let Ok(_) = tx.send(commit.clone()).await {
log_debug!("sending");
sleep!(std::time::Duration::from_secs(3));
}
log_debug!("end of sending");
Ok(())
}
spawn_and_log_error(send(tx.clone(), commit));
// pub async fn doc_sync_branch(&mut self, anuri: String) -> (Receiver<Commit>, Sender<Commit>) {
// let obj_ref = ObjectRef {
// id: ObjectId::Blake3Digest32([
// 228, 228, 181, 117, 36, 206, 41, 223, 130, 96, 85, 195, 104, 137, 78, 145, 42, 176,
// 58, 244, 111, 97, 246, 39, 11, 76, 135, 150, 188, 111, 66, 33,
// ]),
// key: SymKey::ChaCha20Key([
// 100, 243, 39, 242, 203, 131, 102, 50, 9, 54, 248, 113, 4, 160, 28, 45, 73, 56, 217,
// 112, 95, 150, 144, 137, 9, 57, 106, 5, 39, 202, 146, 94,
// ]),
// };
// let refs = vec![obj_ref.clone()];
// let metadata = vec![5u8; 55];
// let (member_privkey, member_pubkey) = generate_keypair();
// let overlay = OverlayId::nil();
// let commit = Commit::new(
// &member_privkey,
// &member_pubkey,
// overlay,
// PubKey::nil(),
// QuorumType::NoSigning,
// vec![],
// vec![],
// vec![],
// vec![],
// refs,
// vec![],
// metadata,
// obj_ref.clone(),
// )
// .unwrap();
// let (tx, rx) = mpsc::unbounded::<Commit>();
// async fn send(mut tx: Sender<Commit>, commit: Commit) -> ResultSend<()> {
// while let Ok(_) = tx.send(commit.clone()).await {
// log_debug!("sending");
// sleep!(std::time::Duration::from_secs(3));
// }
// log_debug!("end of sending");
// Ok(())
// }
// spawn_and_log_error(send(tx.clone(), commit));
(rx, tx.clone())
}
// (rx, tx.clone())
// }
pub fn reconnecting(&mut self, peer_id: X25519PrivKey, user: Option<PubKey>) {
let peerinfo = self.peers.get_mut(&(user, peer_id));
@ -422,12 +409,22 @@ impl<'a> Broker<'a> {
None => {}
}
}
fn remove_peer_id(&mut self, peer_id: X25519PrivKey, user: Option<PubKey>) {
async fn remove_peer_id(&mut self, peer_id: X25519PrivKey, user: Option<PubKey>) {
let removed = self.peers.remove(&(user, peer_id));
match removed {
Some(info) => match info.connected {
PeerConnection::NONE => {}
PeerConnection::Client(cb) => {}
PeerConnection::Client(cb) => {
#[cfg(not(target_arch = "wasm32"))]
if user.is_none() {
// server side
if let Some(fsm) = cb.fsm {
if let Ok(user) = fsm.lock().await.user_id() {
let _ = self.remove_user_peer(&user, &peer_id);
}
}
}
}
PeerConnection::Core(ip) => {
self.direct_connections.remove(&ip);
}
@ -480,12 +477,12 @@ impl<'a> Broker<'a> {
shutdown_sender,
direct_connections: HashMap::new(),
peers: HashMap::new(),
tauri_streams: HashMap::new(),
closing: false,
server_broker: None,
disconnections_sender,
disconnections_receiver: Some(disconnections_receiver),
local_broker: None,
users_peers: HashMap::new(),
}
}
@ -625,7 +622,11 @@ impl<'a> Broker<'a> {
Some(Either::Right(remote_peer_id)) => {
let res = join.next().await;
log_debug!("SOCKET IS CLOSED {:?} peer_id: {:?}", res, remote_peer_id);
BROKER.write().await.remove_peer_id(remote_peer_id, None);
BROKER
.write()
.await
.remove_peer_id(remote_peer_id, None)
.await;
}
_ => {
log_debug!(
@ -649,6 +650,36 @@ impl<'a> Broker<'a> {
Ok(())
}
#[cfg(not(target_arch = "wasm32"))]
fn add_user_peer(&mut self, user: UserId, peer: X25519PrivKey) -> Result<(), ProtocolError> {
let peers_set = self
.users_peers
.entry(user)
.or_insert(HashSet::with_capacity(1));
if !peers_set.insert(peer) {
return Err(ProtocolError::PeerAlreadyConnected);
}
Ok(())
}
#[cfg(not(target_arch = "wasm32"))]
fn remove_user_peer(
&mut self,
user: &UserId,
peer: &X25519PrivKey,
) -> Result<(), ProtocolError> {
let peers_set = self
.users_peers
.get_mut(user)
.ok_or(ProtocolError::UserNotConnected)?;
if !peers_set.remove(peer) {
return Err(ProtocolError::PeerNotConnected);
}
Ok(())
}
#[cfg(not(target_arch = "wasm32"))]
pub async fn attach_and_authorize_peer_id(
&mut self,
@ -709,7 +740,10 @@ impl<'a> Broker<'a> {
connection.reset_shutdown(remote_peer_id).await;
let connected = if !is_core {
fsm.set_user_id(client.unwrap().user);
let user = client.unwrap().user;
fsm.set_user_id(user);
self.add_user_peer(user, remote_peer_id)?;
PeerConnection::Client(connection)
} else {
let dc = DirectConnection {
@ -890,7 +924,8 @@ impl<'a> Broker<'a> {
BROKER
.write()
.await
.remove_peer_id(remote_peer_id, config.get_user());
.remove_peer_id(remote_peer_id, config.get_user())
.await;
}
}
.await;
@ -928,6 +963,50 @@ impl<'a> Broker<'a> {
}
}
#[cfg(not(target_arch = "wasm32"))]
pub async fn dispatch_event(
&self,
overlay: &OverlayId,
event: Event,
user_id: &UserId,
remote_peer: &PubKey,
) -> Result<(), ServerError> {
// TODO: deal with subscriptions on the outer overlay. for now we assume everything is on the inner overlay
let peers_for_local_dispatch = self.get_server_broker()?.dispatch_event(
overlay,
event.clone(),
user_id,
remote_peer,
)?;
log_debug!("dispatch_event {:?}", peers_for_local_dispatch);
for peer in peers_for_local_dispatch {
log_debug!("dispatch_event peer {:?}", peer);
if let Some(BrokerPeerInfo {
connected: PeerConnection::Client(ConnectionBase { fsm: Some(fsm), .. }),
..
}) = self.peers.get(&(None, peer.to_owned().to_dh()))
{
log_debug!("ForwardedEvent peer {:?}", peer);
let _ = fsm
.lock()
.await
.send(ProtocolMessage::ClientMessage(ClientMessage::V0(
ClientMessageV0 {
overlay: *overlay,
padding: vec![],
content: ClientMessageContentV0::ForwardedEvent(event.clone()),
},
)))
.await;
}
}
Ok(())
}
pub fn take_disconnections_receiver(&mut self) -> Option<Receiver<String>> {
self.disconnections_receiver.take()
}

@ -264,6 +264,10 @@ impl NoiseFSM {
}
}
pub fn remote_peer(&self) -> &Option<PubKey> {
&self.remote
}
pub(crate) fn set_user_id(&mut self, user: UserId) {
if self.user.is_none() {
self.user = Some(user);
@ -309,7 +313,12 @@ impl NoiseFSM {
if in_reply_to != 0 {
msg.set_id(in_reply_to);
}
log_debug!("SENDING: {:?}", msg);
#[cfg(debug_assertions)]
if msg.is_block() {
log_debug!("SENDING BLOCK");
} else {
log_debug!("SENDING: {:?}", msg);
}
if self.noise_cipher_state_enc.is_some() {
let cipher = self.encrypt(msg)?;
self.sender
@ -408,11 +417,16 @@ impl NoiseFSM {
}
}
if msg_opt.is_some() {
log_debug!(
"RECEIVED: {:?} in state {:?}",
msg_opt.as_ref().unwrap(),
self.state
);
#[cfg(debug_assertions)]
if msg_opt.as_ref().unwrap().is_block() {
log_debug!("RECEIVED BLOCK");
} else {
log_debug!(
"RECEIVED: {:?} in state {:?}",
msg_opt.as_ref().unwrap(),
self.state
);
}
}
match self.state {
FSMstate::Closing => {}
@ -538,7 +552,7 @@ impl NoiseFSM {
// CLIENT side receiving probe response
if let Some(msg) = msg_opt {
let id = msg.id();
if id != 0 {
if id != Some(0) {
return Err(ProtocolError::InvalidState);
}
if let ProtocolMessage::ProbeResponse(_probe_res) = &msg {
@ -736,7 +750,7 @@ impl NoiseFSM {
let content = ClientAuthContentV0 {
user: user_pub,
client: client_pub,
/// Nonce from ServerHello
// Nonce from ServerHello
nonce: hello.nonce().clone(),
info: info.clone(),
registration: client_config.registration,
@ -747,7 +761,7 @@ impl NoiseFSM {
sign(&client_config.client_priv, &client_pub, &ser)?;
let client_auth = ClientAuth::V0(ClientAuthV0 {
content,
/// Signature by user key
// Signature by user key
sig,
client_sig,
});
@ -845,11 +859,29 @@ impl NoiseFSM {
if msg.type_id() != TypeId::of::<ClientMessage>() {
return Err(ProtocolError::AccessDenied);
}
let id: i64 = msg.id();
if self.dir.is_server() && id > 0 || !self.dir.is_server() && id < 0 {
return Ok(StepReply::Responder(msg));
} else if id != 0 {
return Ok(StepReply::Response(msg));
match msg.id() {
Some(id) => {
if self.dir.is_server() && id > 0 || !self.dir.is_server() && id < 0 {
return Ok(StepReply::Responder(msg));
} else if id != 0 {
return Ok(StepReply::Response(msg));
}
}
None => {
if let ProtocolMessage::ClientMessage(cm) = msg {
if let Some((event, overlay)) = cm.forwarded_event() {
BROKER
.read()
.await
.get_local_broker()?
.write()
.await
.deliver(event, overlay, self.user_id()?)
.await;
return Ok(StepReply::NONE);
}
}
}
}
}
}
@ -1016,7 +1048,7 @@ impl ConnectionBase {
}
Ok(StepReply::Response(response)) => {
let mut lock = actors.lock().await;
let exists = lock.get_mut(&response.id());
let exists = lock.get_mut(&response.id().unwrap_or(0));
match exists {
Some(actor_sender) => {
if actor_sender
@ -1077,6 +1109,8 @@ impl ConnectionBase {
res
}
// FIXME: why not use the FSm instead? looks like this is sending messages to the wire, unencrypted.
// Only final errors are sent this way. but it looks like even those error should be encrypted
pub async fn send(&mut self, cmd: ConnectionCommand) {
let _ = self.sender_tx.as_mut().unwrap().send(cmd).await;
}

@ -11,11 +11,16 @@
//! Trait for ServerBroker
use std::collections::HashSet;
use crate::types::*;
use ng_repo::errors::*;
use ng_repo::types::*;
pub trait IServerBroker: Send + Sync {
fn put_block(&self, overlay_id: &OverlayId, block: Block) -> Result<(), ServerError>;
fn has_block(&self, overlay_id: &OverlayId, block_id: &BlockId) -> Result<(), ServerError>;
fn get_block(&self, overlay_id: &OverlayId, block_id: &BlockId) -> Result<Block, ServerError>;
fn get_user(&self, user_id: PubKey) -> Result<bool, ProtocolError>;
fn add_user(&self, user_id: PubKey, is_admin: bool) -> Result<(), ProtocolError>;
fn del_user(&self, user_id: PubKey) -> Result<(), ProtocolError>;
@ -45,7 +50,7 @@ pub trait IServerBroker: Send + Sync {
) -> Result<RepoPinStatus, ServerError>;
fn pin_repo_write(
&self,
&mut self,
overlay: &OverlayAccess,
repo: &RepoHash,
user_id: &UserId,
@ -53,23 +58,26 @@ pub trait IServerBroker: Send + Sync {
rw_topics: &Vec<PublisherAdvert>,
overlay_root_topic: &Option<TopicId>,
expose_outer: bool,
peer: &PubKey,
) -> Result<RepoOpened, ServerError>;
fn pin_repo_read(
&self,
&mut self,
overlay: &OverlayId,
repo: &RepoHash,
user_id: &UserId,
ro_topics: &Vec<TopicId>,
peer: &PubKey,
) -> Result<RepoOpened, ServerError>;
fn topic_sub(
&self,
&mut self,
overlay: &OverlayId,
repo: &RepoHash,
topic: &TopicId,
user_id: &UserId,
publisher: Option<&PublisherAdvert>,
peer: &PubKey,
) -> Result<TopicSubRes, ServerError>;
fn get_commit(&self, overlay: &OverlayId, id: &ObjectId) -> Result<Vec<Block>, ServerError>;
@ -79,7 +87,8 @@ pub trait IServerBroker: Send + Sync {
overlay: &OverlayId,
event: Event,
user_id: &UserId,
) -> Result<(), ServerError>;
remote_peer: &PubKey,
) -> Result<HashSet<&PubKey>, ServerError>;
fn topic_sync_req(
&self,

@ -1435,8 +1435,9 @@ pub enum ClientType {
NativeService,
NodeService,
Verifier,
Box,
Stick,
VerifierLocal,
Box, // VerifierBox
Stick, // VerifierStick
WalletMaster,
ClientBroker,
Cli,
@ -2047,7 +2048,7 @@ pub struct OverlayAdvertMarkerV0 {
/// Core Block Get V0
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CoreBlockGetV0 {
pub struct CoreBlocksGetV0 {
/// Block ID to request
pub ids: Vec<BlockId>,
@ -2093,8 +2094,8 @@ pub enum ReturnPathTimingAdvert {
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum CoreBlockGet {
V0(CoreBlockGetV0),
pub enum CoreBlocksGet {
V0(CoreBlocksGetV0),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -2107,7 +2108,7 @@ pub enum CoreBlockResult {
pub enum CoreDirectMessageContentV0 {
OverlayAdvertMarker(OverlayAdvertMarker),
ReturnPathTimingAdvert(ReturnPathTimingAdvert),
BlockGet(CoreBlockGet),
BlocksGet(CoreBlocksGet),
BlockResult(CoreBlockResult),
//PostInbox,
//PartialSignature,
@ -2199,7 +2200,7 @@ pub enum OuterOverlayRequestContentV0 {
OverlayLeave(OverlayLeave),
TopicSub(PubKey),
TopicUnsub(PubKey),
BlockGet(BlockGet),
BlocksGet(BlocksGet),
//PostInboxRequest(PostInboxRequest),
}
@ -2958,11 +2959,6 @@ pub enum TopicSub {
}
impl TopicSub {
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
pub fn hash(&self) -> &RepoHash {
match self {
Self::V0(o) => &o.repo_hash,
@ -2983,6 +2979,11 @@ impl TopicSub {
Self::V0(v0) => v0.overlay = Some(overlay),
}
}
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
}
/// Request unsubscription from a `Topic` of an already opened or pinned Repo
@ -3005,7 +3006,7 @@ pub enum TopicUnsub {
///
/// commit_header_key is always set to None in the reply when request is made on OuterOverlay of protected or Group overlays
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlockGetV0 {
pub struct BlocksGetV0 {
/// Block IDs to request
pub ids: Vec<BlockId>,
@ -3015,28 +3016,31 @@ pub struct BlockGetV0 {
/// Topic the object is referenced from, if it is known by the requester.
/// can be used to do a BlockSearchTopic in the core overlay.
pub topic: Option<TopicId>,
#[serde(skip)]
pub overlay: Option<OverlayId>,
}
/// Request an object by ID
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum BlockGet {
V0(BlockGetV0),
pub enum BlocksGet {
V0(BlocksGetV0),
}
impl BlockGet {
impl BlocksGet {
pub fn ids(&self) -> &Vec<BlockId> {
match self {
BlockGet::V0(o) => &o.ids,
BlocksGet::V0(o) => &o.ids,
}
}
pub fn include_children(&self) -> bool {
match self {
BlockGet::V0(o) => o.include_children,
BlocksGet::V0(o) => o.include_children,
}
}
pub fn topic(&self) -> Option<PubKey> {
match self {
BlockGet::V0(o) => o.topic,
BlocksGet::V0(o) => o.topic,
}
}
}
@ -3044,10 +3048,10 @@ impl BlockGet {
/// Request a Commit by ID
///
/// commit_header_key is always set to None in the reply when request is made on OuterOverlay of protected or Group overlays
/// The difference with BlockGet is that the Broker will try to return all the commit blocks as they were sent in the Pub/Sub Event, if it has it.
/// This will help in having all the blocks (including the header and body blocks), while a BlockGet would inevitably return only the blocks of the ObjectContent,
/// The difference with BlocksGet is that the Broker will try to return all the commit blocks as they were sent in the Pub/Sub Event, if it has it.
/// This will help in having all the blocks (including the header and body blocks), while a BlocksGet would inevitably return only the blocks of the ObjectContent,
/// and not the header nor the body. And the load() would fail with CommitLoadError::MissingBlocks. That's what happens when the Commit is not present in the pubsub,
/// and we need to default to using BlockGet instead.
/// and we need to default to using BlocksGet instead.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct CommitGetV0 {
/// Block IDs to request
@ -3079,6 +3083,9 @@ impl CommitGet {
pub struct BlocksPutV0 {
/// Blocks to store
pub blocks: Vec<Block>,
#[serde(skip)]
pub overlay: Option<OverlayId>,
}
/// Request to store one or more blocks
@ -3093,15 +3100,28 @@ impl BlocksPut {
BlocksPut::V0(o) => &o.blocks,
}
}
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
pub fn set_overlay(&mut self, overlay: OverlayId) {
match self {
Self::V0(v0) => v0.overlay = Some(overlay),
}
}
}
/// Request to know if some blocks are present locally
///
/// used by client before publishing an event, to know what to push
/// used by client before publishing an event with files, to know what to push
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct BlocksExistV0 {
/// Ids of Blocks to check
pub blocks: Vec<BlockId>,
#[serde(skip)]
pub overlay: Option<OverlayId>,
}
/// Request to store one or more blocks
@ -3116,6 +3136,16 @@ impl BlocksExist {
BlocksExist::V0(o) => &o.blocks,
}
}
pub fn overlay(&self) -> &OverlayId {
match self {
Self::V0(v0) => v0.overlay.as_ref().unwrap(),
}
}
pub fn set_overlay(&mut self, overlay: OverlayId) {
match self {
Self::V0(v0) => v0.overlay = Some(overlay),
}
}
}
/// Request to pin an object
@ -3201,7 +3231,7 @@ pub enum ClientRequestContentV0 {
TopicUnsub(TopicUnsub),
BlocksExist(BlocksExist),
BlockGet(BlockGet),
BlocksGet(BlocksGet),
CommitGet(CommitGet),
TopicSyncReq(TopicSyncReq),
@ -3224,6 +3254,9 @@ impl ClientRequestContentV0 {
ClientRequestContentV0::PublishEvent(a) => a.set_overlay(overlay),
ClientRequestContentV0::CommitGet(a) => a.set_overlay(overlay),
ClientRequestContentV0::TopicSyncReq(a) => a.set_overlay(overlay),
ClientRequestContentV0::BlocksPut(a) => a.set_overlay(overlay),
ClientRequestContentV0::BlocksExist(a) => a.set_overlay(overlay),
ClientRequestContentV0::BlocksGet(a) => a.set_overlay(overlay),
_ => unimplemented!(),
}
}
@ -3272,6 +3305,9 @@ impl ClientRequest {
ClientRequestContentV0::PublishEvent(r) => r.get_actor(self.id()),
ClientRequestContentV0::CommitGet(r) => r.get_actor(self.id()),
ClientRequestContentV0::TopicSyncReq(r) => r.get_actor(self.id()),
ClientRequestContentV0::BlocksPut(r) => r.get_actor(self.id()),
ClientRequestContentV0::BlocksExist(r) => r.get_actor(self.id()),
ClientRequestContentV0::BlocksGet(r) => r.get_actor(self.id()),
_ => unimplemented!(),
},
}
@ -3364,6 +3400,11 @@ impl TopicSubRes {
publisher,
})
}
pub fn known_heads(&self) -> &Vec<ObjectId> {
match self {
Self::V0(v0) => &v0.known_heads,
}
}
}
impl From<TopicId> for TopicSubRes {
@ -3518,6 +3559,22 @@ pub enum ClientMessageContentV0 {
ForwardedEvent(Event),
ForwardedBlock(Block),
}
impl ClientMessageContentV0 {
pub fn is_block(&self) -> bool {
match self {
Self::ClientRequest(ClientRequest::V0(ClientRequestV0 {
content: ClientRequestContentV0::BlocksPut(_),
..
})) => true,
Self::ClientResponse(ClientResponse::V0(ClientResponseV0 {
content: ClientResponseContentV0::Block(_),
..
})) => true,
_ => false,
}
}
}
/// Broker message for an overlay
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ClientMessageV0 {
@ -3548,6 +3605,16 @@ impl ClientMessage {
},
}
}
pub fn forwarded_event(self) -> Option<(Event, OverlayId)> {
let overlay = self.overlay_id();
match self {
ClientMessage::V0(o) => match o.content {
ClientMessageContentV0::ForwardedEvent(e) => Some((e, overlay)),
_ => None,
},
}
}
pub fn overlay_id(&self) -> OverlayId {
match self {
ClientMessage::V0(o) => o.overlay,
@ -3567,15 +3634,13 @@ impl ClientMessage {
}
}
}
pub fn id(&self) -> i64 {
pub fn id(&self) -> Option<i64> {
match self {
ClientMessage::V0(o) => match &o.content {
ClientMessageContentV0::ClientResponse(r) => r.id(),
ClientMessageContentV0::ClientRequest(r) => r.id(),
ClientMessageContentV0::ClientResponse(r) => Some(r.id()),
ClientMessageContentV0::ClientRequest(r) => Some(r.id()),
ClientMessageContentV0::ForwardedEvent(_)
| ClientMessageContentV0::ForwardedBlock(_) => {
panic!("it is an event")
}
| ClientMessageContentV0::ForwardedBlock(_) => None,
},
}
}
@ -3869,12 +3934,12 @@ impl TryFrom<&ProtocolMessage> for ServerError {
}
impl ProtocolMessage {
pub fn id(&self) -> i64 {
pub fn id(&self) -> Option<i64> {
match self {
ProtocolMessage::ExtRequest(ext_req) => ext_req.id(),
ProtocolMessage::ExtResponse(ext_res) => ext_res.id(),
ProtocolMessage::ExtRequest(ext_req) => Some(ext_req.id()),
ProtocolMessage::ExtResponse(ext_res) => Some(ext_res.id()),
ProtocolMessage::ClientMessage(client_msg) => client_msg.id(),
_ => 0,
_ => None,
}
}
pub fn set_id(&mut self, id: i64) {
@ -3940,6 +4005,16 @@ impl ProtocolMessage {
padding: vec![],
}))
}
pub fn is_block(&self) -> bool {
match self {
ProtocolMessage::ClientMessage(ClientMessage::V0(ClientMessageV0 {
content: c,
..
})) => c.is_block(),
_ => false,
}
}
}
impl From<ClientResponseContentV0> for ClientResponse {

@ -38,6 +38,8 @@ pub trait BlockStorage: Send + Sync {
/// number of Blocks in the storage
fn len(&self) -> Result<usize, StorageError>;
fn has(&self, overlay: &OverlayId, id: &BlockId) -> Result<(), StorageError>;
}
/* LMDB values:
@ -138,6 +140,13 @@ impl BlockStorage for HashMapBlockStorage {
}
}
fn has(&self, _overlay: &OverlayId, id: &BlockId) -> Result<(), StorageError> {
if !self.blocks.read().unwrap().contains_key(id) {
return Err(StorageError::NotFound);
}
Ok(())
}
fn len(&self) -> Result<usize, StorageError> {
Ok(self.get_len())
}

@ -532,6 +532,22 @@ impl Commit {
res
}
/// Get files
pub fn files(&self) -> Vec<ObjectRef> {
let mut res: Vec<ObjectRef> = vec![];
match self {
Commit::V0(c) => match &c.content.header_keys() {
Some(CommitHeaderKeys::V0(hk_v0)) => {
for file in hk_v0.files.iter() {
res.push(file.clone());
}
}
None => {}
},
};
res
}
/// Get deps (that have both an ID in the header and a key in the header_keys)
pub fn deps(&self) -> Vec<ObjectRef> {
let mut res: Vec<ObjectRef> = vec![];
@ -1394,7 +1410,7 @@ impl fmt::Display for CommitBody {
// CommitBodyV0::Snapshot(b) => write!(f, "Snapshot {}", b), // a soft snapshot
// CommitBodyV0::AsyncTransaction(b) => write!(f, "AsyncTransaction {}", b), // partial_order
// CommitBodyV0::SyncTransaction(b) => write!(f, "SyncTransaction {}", b), // total_order
// CommitBodyV0::AddFile(b) => write!(f, "AddFile {}", b),
CommitBodyV0::AddFile(b) => write!(f, "AddFile {}", b),
// CommitBodyV0::RemoveFile(b) => write!(f, "RemoveFile {}", b),
// CommitBodyV0::Compact(b) => write!(f, "Compact {}", b), // a hard snapshot. total order enforced with total_order_quorum
//Merge(Merge) => write!(f, "RootBranch {}", b),

@ -10,6 +10,7 @@
//! Errors
pub use crate::commit::{CommitLoadError, CommitVerifyError};
use crate::file::FileError;
use crate::object::Object;
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
@ -70,6 +71,10 @@ pub enum NgError {
BrokerConfigErrorStr(&'static str),
BrokerConfigError(String),
MalformedEvent,
InvalidPayload,
WrongUploadId,
FileError(FileError),
InternalError,
}
impl Error for NgError {}
@ -129,6 +134,12 @@ impl From<CommitLoadError> for NgError {
}
}
impl From<FileError> for NgError {
fn from(e: FileError) -> Self {
NgError::FileError(e)
}
}
impl From<CommitVerifyError> for NgError {
fn from(e: CommitVerifyError) -> Self {
NgError::CommitVerifyError(e)
@ -232,6 +243,10 @@ pub enum ServerError {
AccessDenied,
InvalidHeader,
MalformedBranch,
BrokerError,
ProtocolError,
PeerAlreadySubscribed,
SubscriptionNotFound,
}
impl From<StorageError> for ServerError {
@ -243,6 +258,16 @@ impl From<StorageError> for ServerError {
}
}
impl From<ProtocolError> for ServerError {
fn from(e: ProtocolError) -> Self {
match e {
ProtocolError::NotFound => ServerError::NotFound,
ProtocolError::BrokerError => ServerError::BrokerError,
_ => ServerError::ProtocolError,
}
}
}
impl From<NgError> for ServerError {
fn from(e: NgError) -> Self {
match e {
@ -281,10 +306,14 @@ pub enum VerifierError {
TopicNotFound,
RepoNotFound,
StoreNotFound,
OverlayNotFound,
BranchNotFound,
InvalidBranch,
NoBlockStorageAvailable,
RootBranchNotFound,
BranchNotOpened,
DoubleBranchSubscription,
InvalidCommit,
}
impl From<NgError> for VerifierError {
@ -371,6 +400,8 @@ pub enum ProtocolError {
Expired,
PeerAlreadyConnected,
UserNotConnected,
PeerNotConnected,
OtherError,
NetError,
StorageError,

@ -12,6 +12,7 @@
use core::fmt;
use std::cmp::min;
use std::collections::HashMap;
use std::sync::Arc;
use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::ChaCha20;
@ -25,7 +26,7 @@ use crate::store::Store;
use crate::types::*;
/// File errors
#[derive(Debug, PartialEq)]
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum FileError {
/// Missing blocks
MissingBlocks(Vec<BlockId>),
@ -71,17 +72,20 @@ impl From<ObjectParseError> for FileError {
}
}
trait ReadFile {
pub trait ReadFile {
fn read(&self, pos: usize, size: usize) -> Result<Vec<u8>, FileError>;
fn get_all_blocks_ids(&self) -> Result<Vec<ObjectId>, FileError>;
}
/// A File in memory (read access only)
pub struct File<'a> {
internal: Box<dyn ReadFile + 'a>,
blocks_ids: Vec<BlockId>,
}
impl<'a> File<'a> {
pub fn open(id: ObjectId, key: SymKey, store: &'a Store) -> Result<File<'a>, FileError> {
pub fn open(id: ObjectId, key: SymKey, store: Arc<Store>) -> Result<File<'a>, FileError> {
let root_block = store.get(&id)?;
if root_block.children().len() == 2
@ -89,12 +93,14 @@ impl<'a> File<'a> {
{
Ok(File {
internal: Box::new(RandomAccessFile::open(id, key, store)?),
blocks_ids: vec![],
})
} else {
let obj = Object::load(id, Some(key), store)?;
let obj = Object::load(id, Some(key), &store)?;
match obj.content_v0()? {
ObjectContentV0::SmallFile(small_file) => Ok(File {
internal: Box::new(small_file),
blocks_ids: obj.block_ids(),
}),
_ => Err(FileError::NotAFile),
}
@ -106,6 +112,13 @@ impl<'a> ReadFile for File<'a> {
fn read(&self, pos: usize, size: usize) -> Result<Vec<u8>, FileError> {
self.internal.read(pos, size)
}
fn get_all_blocks_ids(&self) -> Result<Vec<ObjectId>, FileError> {
if self.blocks_ids.len() > 0 {
Ok(self.blocks_ids.to_vec())
} else {
self.internal.get_all_blocks_ids()
}
}
}
impl ReadFile for SmallFile {
@ -114,6 +127,9 @@ impl ReadFile for SmallFile {
Self::V0(v0) => v0.read(pos, size),
}
}
fn get_all_blocks_ids(&self) -> Result<Vec<ObjectId>, FileError> {
unimplemented!();
}
}
impl ReadFile for SmallFileV0 {
@ -126,12 +142,15 @@ impl ReadFile for SmallFileV0 {
}
Ok(self.content[pos..pos + size].to_vec())
}
fn get_all_blocks_ids(&self) -> Result<Vec<ObjectId>, FileError> {
unimplemented!();
}
}
/// A RandomAccessFile in memory. This is not used to serialize data
pub struct RandomAccessFile<'a> {
pub struct RandomAccessFile {
//storage: Arc<&'a dyn BlockStorage>,
store: &'a Store,
store: Arc<Store>,
/// accurate once saved or opened
meta: RandomAccessFileMeta,
@ -155,18 +174,61 @@ pub struct RandomAccessFile<'a> {
size: usize,
}
impl<'a> ReadFile for RandomAccessFile<'a> {
impl ReadFile for RandomAccessFile {
fn get_all_blocks_ids(&self) -> Result<Vec<ObjectId>, FileError> {
if self.id.is_none() {
unimplemented!();
}
let mut res = Vec::with_capacity(4);
let _: Vec<()> = self
.blocks
.iter()
.map(|(id, _)| res.push(id.clone()))
.collect();
recurse_tree(
&self.store,
self.content_block.as_ref().unwrap().clone(),
&mut res,
self.meta.depth(),
)?;
fn recurse_tree(
store: &Store,
current_block_id_key: (Digest, SymKey),
res: &mut Vec<Digest>,
level: u8,
) -> Result<(), FileError> {
res.push(current_block_id_key.0);
if level > 0 {
let tree_block = store.get(&current_block_id_key.0)?;
let (children, content) = tree_block.read(&current_block_id_key.1)?;
if children.len() == 0 || content.len() > 0 {
return Err(FileError::BlockDeserializeError);
}
for child in children {
recurse_tree(store, child, res, level - 1)?;
}
}
Ok(())
}
Ok(res)
}
/// reads at most one block from the file. the returned vector should be tested for size. it might be smaller than what you asked for.
/// `pos`ition can be anywhere in the file.
//TODO: parallelize decryption on multi threads (cores)
fn read(&self, pos: usize, size: usize) -> Result<Vec<u8>, FileError> {
fn read(&self, pos: usize, mut size: usize) -> Result<Vec<u8>, FileError> {
if size == 0 {
return Err(FileError::InvalidArgument);
}
if self.id.is_some() {
if pos + size > self.meta.total_size() as usize {
let total = self.meta.total_size() as usize;
if pos > total {
return Err(FileError::EndOfFile);
}
size = min(total - pos, size);
let mut current_block_id_key = self.content_block.as_ref().unwrap().clone();
let depth = self.meta.depth();
@ -242,7 +304,7 @@ impl<'a> ReadFile for RandomAccessFile<'a> {
}
}
impl<'a> RandomAccessFile<'a> {
impl RandomAccessFile {
pub fn meta(&self) -> &RandomAccessFileMeta {
&self.meta
}
@ -396,8 +458,8 @@ impl<'a> RandomAccessFile<'a> {
block_size: usize,
content_type: String,
metadata: Vec<u8>,
store: &'a Store,
) -> Result<RandomAccessFile<'a>, FileError> {
store: Arc<Store>,
) -> Result<RandomAccessFile, FileError> {
//let max_block_size = store_max_value_size();
let valid_block_size = store_valid_value_size(block_size) - BLOCK_EXTRA;
@ -405,22 +467,22 @@ impl<'a> RandomAccessFile<'a> {
let total_size = content.len() as u64;
let mut conv_key = Object::convergence_key(store);
let mut conv_key = Object::convergence_key(&store);
let mut blocks: Vec<(BlockId, BlockKey)> = vec![];
let mut already_existing: HashMap<BlockKey, BlockId> = HashMap::new();
//log_debug!("making the leaves");
for chunck in content.chunks(valid_block_size) {
let data_chunk = ChunkContentV0::DataChunk(chunck.to_vec());
for chunk in content.chunks(valid_block_size) {
let data_chunk = ChunkContentV0::DataChunk(chunk.to_vec());
let content_ser = serde_bare::to_vec(&data_chunk).unwrap();
blocks.push(Self::make_block(
content_ser,
&conv_key,
vec![],
&mut already_existing,
store,
&store,
)?);
}
assert_eq!(
@ -438,7 +500,7 @@ impl<'a> RandomAccessFile<'a> {
});
let (content_block, root_block) =
Self::save_(&mut already_existing, &blocks, &mut meta, &conv_key, store)?;
Self::save_(&mut already_existing, &blocks, &mut meta, &conv_key, &store)?;
conv_key.zeroize();
@ -460,7 +522,7 @@ impl<'a> RandomAccessFile<'a> {
block_size: usize,
content_type: String,
metadata: Vec<u8>,
store: &'a Store,
store: Arc<Store>,
) -> Self {
let valid_block_size = store_valid_value_size(block_size) - BLOCK_EXTRA;
@ -476,14 +538,14 @@ impl<'a> RandomAccessFile<'a> {
});
Self {
store,
store: Arc::clone(&store),
meta,
block_contents: HashMap::new(),
blocks: vec![],
id: None,
key: None,
content_block: None,
conv_key: Some(Object::convergence_key(store)),
conv_key: Some(Object::convergence_key(&store)),
remainder: vec![],
size: 0,
}
@ -518,7 +580,7 @@ impl<'a> RandomAccessFile<'a> {
&conv_key,
vec![],
&mut already_existing,
self.store,
&self.store,
)?);
} else {
// not enough data to create a new block
@ -530,28 +592,28 @@ impl<'a> RandomAccessFile<'a> {
return Ok(());
}
for chunck in data[pos..].chunks(chunk_size) {
if chunck.len() == chunk_size {
for chunk in data[pos..].chunks(chunk_size) {
if chunk.len() == chunk_size {
self.size += chunk_size;
//log_debug!("size += chunk_size {} {}", self.size, chunk_size);
let data_chunk = ChunkContentV0::DataChunk(chunck.to_vec());
let data_chunk = ChunkContentV0::DataChunk(chunk.to_vec());
let content_ser = serde_bare::to_vec(&data_chunk).unwrap();
self.blocks.push(Self::make_block(
content_ser,
&conv_key,
vec![],
&mut already_existing,
self.store,
&self.store,
)?);
} else {
self.remainder = Vec::from(chunck);
self.remainder = Vec::from(chunk);
return Ok(());
}
}
Ok(())
}
pub fn save(&mut self) -> Result<(), FileError> {
pub fn save(&mut self) -> Result<ObjectId, FileError> {
if self.id.is_some() {
return Err(FileError::AlreadySaved);
}
@ -568,7 +630,7 @@ impl<'a> RandomAccessFile<'a> {
&self.conv_key.unwrap(),
vec![],
&mut HashMap::new(),
self.store,
&self.store,
)?);
}
@ -580,7 +642,7 @@ impl<'a> RandomAccessFile<'a> {
&self.blocks,
&mut self.meta,
self.conv_key.as_ref().unwrap(),
self.store,
&self.store,
)?;
self.conv_key.as_mut().unwrap().zeroize();
@ -593,15 +655,26 @@ impl<'a> RandomAccessFile<'a> {
self.blocks = vec![];
self.blocks.shrink_to_fit();
Ok(())
Ok(root_block.0)
}
pub fn reference(&self) -> Option<ObjectRef> {
if self.key.is_some() && self.id.is_some() {
Some(ObjectRef::from_id_key(
self.id.unwrap(),
self.key.as_ref().unwrap().clone(),
))
} else {
None
}
}
/// Opens a file for read purpose.
pub fn open(
id: ObjectId,
key: SymKey,
store: &'a Store,
) -> Result<RandomAccessFile<'a>, FileError> {
store: Arc<Store>,
) -> Result<RandomAccessFile, FileError> {
// load root block
let root_block = store.get(&id)?;
@ -617,7 +690,7 @@ impl<'a> RandomAccessFile<'a> {
let meta_object = Object::load(
root_sub_blocks[0].0,
Some(root_sub_blocks[0].1.clone()),
store,
&store,
)?;
let meta = match meta_object.content_v0()? {
@ -629,7 +702,7 @@ impl<'a> RandomAccessFile<'a> {
store,
meta,
block_contents: HashMap::new(), // not used in this case
blocks: vec![], // not used in this case
blocks: vec![(id, SymKey::nil()), (root_sub_blocks[0].0, SymKey::nil())], // not used in this case
id: Some(id),
key: Some(key),
content_block: Some(root_sub_blocks[1].clone()),
@ -680,7 +753,7 @@ impl<'a> RandomAccessFile<'a> {
}
}
impl fmt::Display for RandomAccessFile<'_> {
impl fmt::Display for RandomAccessFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
@ -736,7 +809,7 @@ mod test {
block_size,
"text/plain".to_string(),
vec![],
&store,
Arc::clone(&store),
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -781,7 +854,7 @@ mod test {
assert_eq!(file.depth(), Ok(0));
assert_eq!(store.len(), Ok(3));
let file = RandomAccessFile::open(id, file.key.unwrap(), &store).expect("re open");
let file = RandomAccessFile::open(id, file.key.unwrap(), store).expect("re open");
log_debug!("{}", file);
@ -809,7 +882,7 @@ mod test {
store_max_value_size(),
"text/plain".to_string(),
vec![],
&store,
Arc::clone(&store),
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -842,7 +915,7 @@ mod test {
store_max_value_size(),
"text/plain".to_string(),
vec![],
&store,
Arc::clone(&store),
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -877,7 +950,7 @@ mod test {
store_valid_value_size(0),
"text/plain".to_string(),
vec![],
&store,
Arc::clone(&store),
)
.expect("new_from_slice");
log_debug!("{}", file);
@ -938,7 +1011,7 @@ mod test {
store_valid_value_size(0),
"text/plain".to_string(),
vec![],
&store,
Arc::clone(&store),
)
.expect("new_from_slice");
@ -971,7 +1044,7 @@ mod test {
store_max_value_size(), //store_valid_value_size(0),//
"image/jpeg".to_string(),
vec![],
&store,
store,
);
log_debug!("{}", file);
@ -1041,7 +1114,7 @@ mod test {
store_max_value_size(), //store_valid_value_size(0),//
"image/jpeg".to_string(),
vec![],
&store,
store,
);
log_debug!("{}", file);
@ -1113,7 +1186,7 @@ mod test {
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store,
store,
);
log_debug!("{}", file);
@ -1193,7 +1266,7 @@ mod test {
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store,
store,
);
log_debug!("{}", file);
@ -1271,7 +1344,7 @@ mod test {
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store,
Arc::clone(&store),
);
log_debug!("{}", file);
@ -1320,7 +1393,7 @@ mod test {
store_max_value_size(), //store_valid_value_size(0),//
"image/jpeg".to_string(),
vec![],
&store,
Arc::clone(&store),
);
log_debug!("{}", file);
@ -1331,7 +1404,7 @@ mod test {
file.save().expect("save");
let file2 = RandomAccessFile::open(file.id().unwrap(), file.key.unwrap(), &store)
let file2 = RandomAccessFile::open(file.id().unwrap(), file.key.unwrap(), store)
.expect("reopen file");
// this works only because store_max_value_size() is bigger than the actual size of the JPEG file. so it fits in one block.
@ -1378,7 +1451,7 @@ mod test {
let _ = obj.save_in_test(&store).expect("save");
let file = File::open(obj.id(), obj.key().unwrap(), &store).expect("open");
let file = File::open(obj.id(), obj.key().unwrap(), store).expect("open");
let res = file.read(0, len).expect("read all");
@ -1400,8 +1473,12 @@ mod test {
let store = Store::dummy_public_v0();
log_debug!("creating empty file");
let mut file: RandomAccessFile =
RandomAccessFile::new_empty(max_object_size, "image/jpeg".to_string(), vec![], &store);
let mut file: RandomAccessFile = RandomAccessFile::new_empty(
max_object_size,
"image/jpeg".to_string(),
vec![],
Arc::clone(&store),
);
file.write(&img_buffer).expect("write all");
@ -1414,7 +1491,7 @@ mod test {
let file = File::open(
file.id().unwrap(),
file.key().as_ref().unwrap().clone(),
&store,
store,
)
.expect("open");
@ -1440,7 +1517,7 @@ mod test {
store_valid_value_size(0),
"image/jpeg".to_string(),
vec![],
&store,
store,
);
log_debug!("{}", file);
@ -1489,7 +1566,7 @@ mod test {
store_max_value_size(),
"image/jpeg".to_string(),
vec![],
&store,
store,
);
log_debug!("{}", file);

@ -166,7 +166,10 @@ impl Repo {
pub fn update_branch_current_head(&mut self, branch: &BranchId, commit_ref: ObjectRef) {
//log_info!("from branch {} HEAD UPDATED TO {}", branch, commit_ref.id);
if let Some(branch) = self.branches.get_mut(branch) {
// FIXME: this is very wrong. the DAG is not always linear
branch.current_heads = vec![commit_ref];
//TODO: if userstorage: save current_heads to user storage
}
}
@ -203,6 +206,7 @@ impl Repo {
write_cap: None,
branches: HashMap::new(),
opened_branches: HashMap::new(),
//main_branch_rc: None,
}
}
@ -251,6 +255,15 @@ impl Repo {
None
}
pub fn main_branch(&self) -> Option<&BranchInfo> {
for (_, branch) in self.branches.iter() {
if branch.branch_type == BranchType::Main {
return Some(branch);
}
}
None
}
pub fn root_branch(&self) -> Option<&BranchInfo> {
for (_, branch) in self.branches.iter() {
if branch.branch_type == BranchType::Root {

@ -28,8 +28,8 @@ use rand::prelude::*;
use threshold_crypto::{SecretKeySet, SecretKeyShare};
pub struct Store {
//TODO: store_repo, store_readcap and store_overlay_branch_readcap could be empty, if we have only an outer access to the store. should be Options
store_repo: StoreRepo,
//TODO: store_readcap and store_overlay_branch_readcap could be empty, if we have only an outer access to the store. should be Options
store_readcap: ReadCap,
store_overlay_branch_readcap: ReadCap,
pub overlay_id: OverlayId,
@ -168,6 +168,13 @@ impl Store {
.len()
}
pub fn has(&self, id: &BlockId) -> Result<(), StorageError> {
self.storage
.read()
.map_err(|_| StorageError::BackendError)?
.has(&self.overlay_id, id)
}
/// returns the (branch_commit, add_branch_commit, branch_info)
fn create_branch(
&self,

@ -14,8 +14,9 @@
use crate::errors::NgError;
use crate::store::Store;
use crate::utils::{
decode_key, dh_pubkey_array_from_ed_pubkey_slice, dh_pubkey_from_ed_pubkey_slice,
ed_privkey_to_ed_pubkey, from_ed_privkey_to_dh_privkey, random_key,
decode_key, decode_priv_key, dh_pubkey_array_from_ed_pubkey_slice,
dh_pubkey_from_ed_pubkey_slice, ed_privkey_to_ed_pubkey, from_ed_privkey_to_dh_privkey,
random_key,
};
use core::fmt;
use once_cell::sync::OnceCell;
@ -45,9 +46,8 @@ impl Digest {
impl fmt::Display for Digest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Digest::Blake3Digest32(d) => write!(f, "{}", base64_url::encode(d)),
}
let ser = serde_bare::to_vec(&self).unwrap();
write!(f, "{}", base64_url::encode(&ser))
}
}
@ -103,9 +103,8 @@ impl SymKey {
impl fmt::Display for SymKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::ChaCha20Key(k) => write!(f, "{}", base64_url::encode(k)),
}
let ser = serde_bare::to_vec(&self).unwrap();
write!(f, "{}", base64_url::encode(&ser))
}
}
@ -144,6 +143,12 @@ impl Default for PubKey {
}
impl PubKey {
pub fn to_dh(self) -> X25519PubKey {
match self {
Self::X25519PubKey(x) => x,
_ => panic!("cannot call to_dh on an Edward key"),
}
}
pub fn slice(&self) -> &[u8; 32] {
match self {
PubKey::Ed25519PubKey(o) | PubKey::X25519PubKey(o) => o,
@ -172,26 +177,23 @@ impl PubKey {
}
pub fn to_hash_string(&self) -> String {
let hash = blake3::hash(self.slice());
let ser = serde_bare::to_vec(&self).unwrap();
let hash = blake3::hash(&ser);
base64_url::encode(&hash.as_bytes())
}
}
impl fmt::Display for PubKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PubKey::Ed25519PubKey(d) | PubKey::X25519PubKey(d) => {
write!(f, "{}", base64_url::encode(d))
}
}
let ser = serde_bare::to_vec(&self).unwrap();
write!(f, "{}", base64_url::encode(&ser))
}
}
impl TryFrom<&str> for PubKey {
type Error = NgError;
fn try_from(str: &str) -> Result<Self, NgError> {
let key = decode_key(str)?;
Ok(PubKey::Ed25519PubKey(key))
decode_key(str)
}
}
@ -260,23 +262,14 @@ impl TryFrom<&[u8]> for PrivKey {
impl TryFrom<&str> for PrivKey {
type Error = NgError;
fn try_from(str: &str) -> Result<Self, NgError> {
let key = decode_key(str)?;
Ok(PrivKey::Ed25519PrivKey(key))
decode_priv_key(str)
}
}
impl fmt::Display for PrivKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Ed25519PrivKey(ed) => {
//let priv_key_ser = serde_bare::to_vec(ed).unwrap();
let priv_key_encoded = base64_url::encode(ed);
write!(f, "{}", priv_key_encoded)
}
_ => {
unimplemented!();
}
}
let ser = serde_bare::to_vec(&self).unwrap();
write!(f, "{}", base64_url::encode(&ser))
}
}
@ -440,6 +433,10 @@ impl BlockRef {
pub fn from_id_key(id: BlockId, key: BlockKey) -> Self {
BlockRef { id, key }
}
pub fn nuri(&self) -> String {
format!(":j:{}:k:{}", self.id, self.key)
}
}
impl From<BlockRef> for (BlockId, BlockKey) {
@ -1849,6 +1846,25 @@ pub enum AddFile {
V0(AddFileV0),
}
impl fmt::Display for AddFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::V0(v0) => {
writeln!(f, "V0")?;
writeln!(f, "name: {:?}", v0.name)
}
}
}
}
impl AddFile {
pub fn name(&self) -> &Option<String> {
match self {
Self::V0(v0) => &v0.name,
}
}
}
/// Remove a file from the branch, using ORset CRDT logic
///
/// (removes the ref counting. not necessarily the file itself)

@ -53,9 +53,14 @@ pub fn from_ed_privkey_to_dh_privkey(private: &PrivKey) -> PrivKey {
}
/// don't forget to zeroize the string later on
pub fn decode_key(key_string: &str) -> Result<[u8; 32], NgError> {
pub fn decode_key(key_string: &str) -> Result<PubKey, NgError> {
let vec = base64_url::decode(key_string).map_err(|_| NgError::InvalidKey)?;
Ok(*slice_as_array!(&vec, [u8; 32]).ok_or(NgError::InvalidKey)?)
Ok(serde_bare::from_slice(&vec).map_err(|_| NgError::InvalidKey)?)
}
pub fn decode_priv_key(key_string: &str) -> Result<PrivKey, NgError> {
let vec = base64_url::decode(key_string).map_err(|_| NgError::InvalidKey)?;
Ok(serde_bare::from_slice(&vec).map_err(|_| NgError::InvalidKey)?)
}
pub fn ed_privkey_to_ed_pubkey(privkey: &PrivKey) -> PubKey {

@ -23,6 +23,7 @@ export function client_details2(obj,version) {
export function session_save(key,value) {
try {
sessionStorage.setItem(key, value);
} catch(e) {
@ -81,6 +82,16 @@ export function local_save(key,value) {
}
}
export function storage_clear() {
try {
localStorage.clear();
sessionStorage.clear();
} catch(e) {
console.error(e);
}
}
export function local_get(key) {
try {

@ -150,6 +150,10 @@ module.exports.session_save = function(key,value) {
}
module.exports.storage_clear = function() {
}
module.exports.session_get = function(key) {
}

@ -31,6 +31,7 @@ use ng_wallet::types::*;
use ng_wallet::*;
use nextgraph::local_broker::*;
use nextgraph::verifier::types::*;
use ng_net::WS_PORT;
use ng_repo::errors::NgError;
use ng_repo::log::*;
@ -228,6 +229,7 @@ extern "C" {
fn local_save(key: String, value: String) -> Option<String>;
fn local_get(key: String) -> Option<String>;
fn is_browser() -> bool;
fn storage_clear();
}
#[cfg(wasmpack_target = "nodejs")]
@ -239,6 +241,7 @@ extern "C" {
fn local_save(key: String, value: String) -> Option<String>;
fn local_get(key: String) -> Option<String>;
fn is_browser() -> bool;
fn storage_clear();
}
#[cfg(target_arch = "wasm32")]
@ -273,6 +276,11 @@ fn session_del(key: String) -> Result<(), NgError> {
Ok(())
}
#[cfg(target_arch = "wasm32")]
fn clear() {
storage_clear();
}
#[cfg(target_arch = "wasm32")]
static INIT_LOCAL_BROKER: Lazy<Box<ConfigInitFn>> = Lazy::new(|| {
Box::new(|| {
@ -282,6 +290,7 @@ static INIT_LOCAL_BROKER: Lazy<Box<ConfigInitFn>> = Lazy::new(|| {
session_read: Arc::new(Box::new(session_read)),
session_write: Arc::new(Box::new(session_write)),
session_del: Arc::new(Box::new(session_del)),
clear: Arc::new(Box::new(clear)),
is_browser: is_browser(),
})
})
@ -482,53 +491,35 @@ pub async fn test() {
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn doc_get_file_from_store_with_object_ref(
nuri: String,
obj_ref_js: JsValue,
) -> Result<JsValue, JsValue> {
let obj_ref = serde_wasm_bindgen::from_value::<ObjectRef>(obj_ref_js).unwrap();
log_debug!("doc_get_file {} {:?}", nuri, obj_ref.id,);
// let vec: Vec<u8> = vec![2; 10];
// let view = unsafe { Uint8Array::view(&vec) };
// let x = JsValue::from(Uint8Array::new(view.as_ref()));
pub async fn app_request_stream(
js_session_id: JsValue,
js_request: JsValue,
callback: &js_sys::Function,
) -> Result<JsValue, String> {
let session_id: u64 = serde_wasm_bindgen::from_value::<u64>(js_session_id)
.map_err(|_| "Deserialization error of session_id".to_string())?;
// let ret = ObjectContent::File(File::V0(FileV0 {
// content_type: "text/plain".to_string(),
// metadata: vec![],
// content: vec![45; 20],
// }));
let obj_content = BROKER
.write()
.await
.get_object_from_store_with_object_ref(nuri, obj_ref)
.await
.map_err(|e| e.to_string())?;
let mut request = serde_wasm_bindgen::from_value::<AppRequest>(js_request)
.map_err(|_| "Deserialization error of AppRequest".to_string())?;
Ok(serde_wasm_bindgen::to_value(&obj_content).unwrap())
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn doc_sync_branch(anuri: String, callback: &js_sys::Function) -> JsValue {
let vec: Vec<u8> = vec![2; 10];
let view = unsafe { Uint8Array::view(&vec) };
let x = JsValue::from(Uint8Array::new(view.as_ref()));
let mut reader;
let mut sender;
let mut cancel;
{
(reader, sender) = BROKER.write().await.doc_sync_branch(anuri.clone()).await;
(reader, cancel) = nextgraph::local_broker::app_request_stream(session_id, request)
.await
.map_err(|e: NgError| e.to_string())?;
}
async fn inner_task(
mut reader: Receiver<Commit>,
anuri: String,
mut reader: Receiver<AppResponse>,
callback: js_sys::Function,
) -> ResultSend<()> {
while let Some(commit) = reader.next().await {
let xx = serde_wasm_bindgen::to_value(&commit).unwrap();
while let Some(app_response) = reader.next().await {
let xx = serde_wasm_bindgen::to_value(&app_response).unwrap();
//let xx = JsValue::from(json!(commit).to_string());
//let _ = callback.call1(&this, &xx);
let this = JsValue::null();
@ -545,18 +536,86 @@ pub async fn doc_sync_branch(anuri: String, callback: &js_sys::Function) -> JsVa
Ok(())
}
spawn_and_log_error(inner_task(reader, anuri, callback.clone()));
spawn_and_log_error(inner_task(reader, callback.clone()));
let cb = Closure::once(move || {
log_debug!("close channel");
sender.close_channel()
log_info!("cancelling");
//sender.close_channel()
cancel();
});
//Closure::wrap(Box::new(move |sender| sender.close_channel()) as Box<FnMut(Sender<Commit>)>);
let ret = cb.as_ref().clone();
cb.forget();
return ret;
Ok(ret)
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn app_request(js_session_id: JsValue, js_request: JsValue) -> Result<JsValue, String> {
let session_id: u64 = serde_wasm_bindgen::from_value::<u64>(js_session_id)
.map_err(|_| "Deserialization error of session_id".to_string())?;
let mut request = serde_wasm_bindgen::from_value::<AppRequest>(js_request)
.map_err(|_| "Deserialization error of AppRequest".to_string())?;
let response = nextgraph::local_broker::app_request(session_id, request)
.await
.map_err(|e: NgError| e.to_string())?;
Ok(serde_wasm_bindgen::to_value(&response).unwrap())
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn upload_chunk(
js_session_id: JsValue,
js_upload_id: JsValue,
js_chunk: JsValue,
js_nuri: JsValue,
) -> Result<JsValue, String> {
log_debug!("upload_chunk {:?}", js_nuri);
let session_id: u64 = serde_wasm_bindgen::from_value::<u64>(js_session_id)
.map_err(|_| "Deserialization error of session_id".to_string())?;
let upload_id: u32 = serde_wasm_bindgen::from_value::<u32>(js_upload_id)
.map_err(|_| "Deserialization error of upload_id".to_string())?;
let chunk: serde_bytes::ByteBuf =
serde_wasm_bindgen::from_value::<serde_bytes::ByteBuf>(js_chunk)
.map_err(|_| "Deserialization error of chunk".to_string())?;
let nuri: NuriV0 = serde_wasm_bindgen::from_value::<NuriV0>(js_nuri)
.map_err(|_| "Deserialization error of nuri".to_string())?;
let request = AppRequest::V0(AppRequestV0 {
command: AppRequestCommandV0::FilePut,
nuri,
payload: Some(AppRequestPayload::V0(
AppRequestPayloadV0::RandomAccessFilePutChunk((upload_id, chunk)),
)),
});
let response = nextgraph::local_broker::app_request(session_id, request)
.await
.map_err(|e: NgError| e.to_string())?;
Ok(serde_wasm_bindgen::to_value(&response).unwrap())
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn doc_fetch_private_subscribe() -> Result<JsValue, String> {
let request = AppRequest::V0(AppRequestV0 {
command: AppRequestCommandV0::Fetch(AppFetchContentV0::get_or_subscribe(true)),
nuri: NuriV0::new_private_store_target(),
payload: None,
});
Ok(serde_wasm_bindgen::to_value(&request).unwrap())
}
// #[cfg(target_arch = "wasm32")]
// #[wasm_bindgen]
// pub async fn get_readcap() -> Result<JsValue, String> {
// let request = ObjectRef::nil();
// Ok(serde_wasm_bindgen::to_value(&request).unwrap())
// }
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
pub async fn disconnections_subscribe(callback: &js_sys::Function) -> Result<JsValue, JsValue> {

@ -117,6 +117,15 @@ impl BlockStorage for RocksDbBlockStorage {
Ok(block)
}
fn has(&self, overlay: &OverlayId, id: &BlockId) -> Result<(), StorageError> {
let _block_ser = self
.db
.get(Self::compute_key(overlay, id))
.map_err(|_e| StorageError::BackendError)?
.ok_or(StorageError::NotFound)?;
Ok(())
}
/// Save a block to the storage.
fn put(&self, overlay: &OverlayId, block: &Block, lazy: bool) -> Result<BlockId, StorageError> {
// TODO? return an error if already present in blockstorage and !lazy ?

@ -35,6 +35,7 @@ rand = { version = "0.7", features = ["getrandom"] }
web-time = "0.2.0"
either = "1.8.1"
futures = "0.3.24"
async-trait = "0.1.64"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ng-storage-rocksdb = { path = "../ng-storage-rocksdb", version = "0.1.0" }

@ -9,6 +9,7 @@
//! Verifiers for each Commit type
use crate::types::*;
use crate::verifier::Verifier;
use ng_repo::errors::VerifierError;
use ng_repo::log::*;
@ -19,8 +20,9 @@ use ng_repo::types::*;
use std::collections::HashMap;
use std::sync::Arc;
#[async_trait::async_trait]
pub trait CommitVerifier {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -55,8 +57,9 @@ fn list_dep_chain_until(
Ok(res)
}
#[async_trait::async_trait]
impl CommitVerifier for RootBranch {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -126,9 +129,9 @@ impl CommitVerifier for RootBranch {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for Branch {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -181,9 +184,9 @@ impl CommitVerifier for Branch {
}
}
}
#[async_trait::async_trait]
impl CommitVerifier for SyncSignature {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -212,16 +215,18 @@ impl CommitVerifier for SyncSignature {
}
let commits = list_dep_chain_until(deps[0].clone(), &ack.id, &store)?;
for commit in commits {
verifier.verify_commit(commit, branch_id, repo_id, Arc::clone(&store))?;
verifier
.verify_commit(&commit, branch_id, repo_id, Arc::clone(&store))
.await?;
}
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddBranch {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -258,9 +263,9 @@ impl CommitVerifier for AddBranch {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for Repository {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -272,9 +277,9 @@ impl CommitVerifier for Repository {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for StoreUpdate {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -285,9 +290,9 @@ impl CommitVerifier for StoreUpdate {
verifier.new_store_from_update(self)
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddSignerCap {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -300,9 +305,9 @@ impl CommitVerifier for AddSignerCap {
}
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddMember {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -313,9 +318,9 @@ impl CommitVerifier for AddMember {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveMember {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -326,9 +331,9 @@ impl CommitVerifier for RemoveMember {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddPermission {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -339,9 +344,9 @@ impl CommitVerifier for AddPermission {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemovePermission {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -352,9 +357,9 @@ impl CommitVerifier for RemovePermission {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveBranch {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -365,9 +370,9 @@ impl CommitVerifier for RemoveBranch {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddName {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -378,9 +383,9 @@ impl CommitVerifier for AddName {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveName {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -391,9 +396,9 @@ impl CommitVerifier for RemoveName {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for () {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -404,9 +409,9 @@ impl CommitVerifier for () {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for Snapshot {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -417,9 +422,9 @@ impl CommitVerifier for Snapshot {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddFile {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -427,12 +432,33 @@ impl CommitVerifier for AddFile {
repo_id: &RepoId,
store: Arc<Store>,
) -> Result<(), VerifierError> {
Ok(())
let files = commit.files();
if files.len() == 1 {
let refe = commit.files().remove(0);
let filename = FileName {
heads: vec![], //TODO: put the current heads
name: self.name().clone(),
nuri: refe.nuri(),
reference: refe,
};
verifier
.user_storage
.as_ref()
.unwrap()
.branch_add_file(*branch_id, filename.clone())?;
verifier
.push_app_response(branch_id, AppResponse::V0(AppResponseV0::File(filename)))
.await;
Ok(())
} else {
Err(VerifierError::InvalidCommit)
}
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveFile {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -443,9 +469,9 @@ impl CommitVerifier for RemoveFile {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for Compact {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -456,9 +482,9 @@ impl CommitVerifier for Compact {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AsyncSignature {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -469,9 +495,9 @@ impl CommitVerifier for AsyncSignature {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RootCapRefresh {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -482,9 +508,9 @@ impl CommitVerifier for RootCapRefresh {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for BranchCapRefresh {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -495,9 +521,9 @@ impl CommitVerifier for BranchCapRefresh {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddRepo {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -508,9 +534,9 @@ impl CommitVerifier for AddRepo {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveRepo {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -521,9 +547,9 @@ impl CommitVerifier for RemoveRepo {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for AddLink {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -534,9 +560,9 @@ impl CommitVerifier for AddLink {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveLink {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -547,9 +573,9 @@ impl CommitVerifier for RemoveLink {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for RemoveSignerCap {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,
@ -560,9 +586,9 @@ impl CommitVerifier for RemoveSignerCap {
Ok(())
}
}
#[async_trait::async_trait]
impl CommitVerifier for WalletUpdate {
fn verify(
async fn verify(
&self,
commit: &Commit,
verifier: &mut Verifier,

@ -8,5 +8,7 @@ pub mod site;
pub mod commits;
pub mod request_processor;
#[cfg(not(target_family = "wasm"))]
pub mod rocksdb_user_storage;

@ -0,0 +1,220 @@
// Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Processor for each type of AppRequest
use futures::channel::mpsc;
use futures::SinkExt;
use futures::StreamExt;
use ng_net::utils::ResultSend;
use std::sync::Arc;
use crate::types::*;
use crate::verifier::*;
use ng_net::utils::{spawn_and_log_error, Receiver, Sender};
use ng_repo::errors::*;
use ng_repo::file::{RandomAccessFile, ReadFile};
use ng_repo::types::BranchId;
use ng_repo::types::*;
use ng_repo::log::*;
use ng_repo::types::StoreRepo;
impl AppRequestCommandV0 {
pub(crate) async fn process_stream(
&self,
verifier: &mut Verifier,
nuri: &NuriV0,
payload: &Option<AppRequestPayload>,
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
match self {
Self::Fetch(fetch) => match fetch {
AppFetchContentV0::Subscribe => {
let (_, branch_id, _) =
Self::open_for_target(verifier, &nuri.target, false).await?;
Ok(verifier.create_branch_subscription(branch_id).await?)
}
_ => unimplemented!(),
},
Self::FileGet => {
if nuri.access.len() < 1 || nuri.object.is_none() {
return Err(NgError::InvalidArgument);
}
let (repo_id, _, store_repo) = Self::resolve_target(verifier, &nuri.target)?;
let access = nuri.access.get(0).unwrap();
if let NgAccessV0::Key(key) = access {
let repo = verifier.get_repo(&repo_id, &store_repo)?;
let obj_id = nuri.object.unwrap();
if let Some(mut stream) = verifier
.fetch_blocks_if_needed(&obj_id, &repo_id, &store_repo)
.await?
{
// TODO: start opening the file and running the sending_loop after we received 10 (3 mandatory and 7 depths max) blocks.
// for files below 10MB we wont see a difference, but for big files, we can start sending out some AppResponse earlier.
while let Some(block) = stream.next().await {
repo.store.put(&block)?;
}
}
let file =
RandomAccessFile::open(obj_id, key.clone(), Arc::clone(&repo.store))?;
let (mut tx, rx) = mpsc::unbounded::<AppResponse>();
tx.send(AppResponse::V0(AppResponseV0::FileMeta(FileMetaV0 {
content_type: file.meta().content_type().clone(),
size: file.meta().total_size(),
})))
.await
.map_err(|_| NgError::InternalError)?;
async fn sending_loop(
file: Arc<RandomAccessFile>,
mut tx: Sender<AppResponse>,
) -> ResultSend<()> {
let mut pos = 0;
loop {
let res = file.read(pos, 1048564);
if res.is_err() {
//log_info!("ERR={:?}", res.unwrap_err());
let _ = tx
.send(AppResponse::V0(AppResponseV0::FileBinary(vec![])))
.await;
break;
}
let res = res.unwrap();
//log_info!("reading={} {}", pos, res.len());
pos += res.len();
if let Err(_) = tx
.send(AppResponse::V0(AppResponseV0::FileBinary(res)))
.await
{
break;
}
}
Ok(())
}
spawn_and_log_error(sending_loop(Arc::new(file), tx.clone()));
let fnonce = Box::new(move || {
tx.close_channel();
});
Ok((rx, fnonce))
} else {
return Err(NgError::InvalidArgument);
}
}
_ => unimplemented!(),
}
}
fn resolve_target(
verifier: &mut Verifier,
target: &NuriTargetV0,
) -> Result<(RepoId, BranchId, StoreRepo), NgError> {
match target {
NuriTargetV0::PrivateStore => {
let repo_id = verifier.config.private_store_id.unwrap();
let (branch, store_repo) = {
let repo = verifier.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
let branch = repo.main_branch().ok_or(NgError::BranchNotFound)?;
(branch.id, repo.store.get_store_repo().clone())
};
Ok((repo_id, branch, store_repo))
}
_ => unimplemented!(),
}
}
async fn open_for_target(
verifier: &mut Verifier,
target: &NuriTargetV0,
as_publisher: bool,
) -> Result<(RepoId, BranchId, StoreRepo), NgError> {
let (repo_id, branch, store_repo) = Self::resolve_target(verifier, target)?;
verifier
.open_branch(&repo_id, &branch, as_publisher)
.await?;
Ok((repo_id, branch, store_repo))
}
pub(crate) async fn process(
&self,
verifier: &mut Verifier,
nuri: NuriV0,
payload: Option<AppRequestPayload>,
) -> Result<AppResponse, NgError> {
match self {
Self::FilePut => match payload {
None => return Err(NgError::InvalidPayload),
Some(AppRequestPayload::V0(v0)) => match v0 {
AppRequestPayloadV0::AddFile(add) => {
let (repo_id, branch, store_repo) =
Self::open_for_target(verifier, &nuri.target, true).await?;
//log_info!("GOT ADD FILE {:?}", add);
let repo = verifier.get_repo(&repo_id, &store_repo)?;
// check that the referenced object exists locally.
repo.store.has(&add.object.id)?;
// we send all the blocks to the broker.
let file = RandomAccessFile::open(
add.object.id.clone(),
add.object.key.clone(),
Arc::clone(&repo.store),
)?;
let blocks = file.get_all_blocks_ids()?;
let found = verifier.has_blocks(blocks, repo).await?;
for block_id in found.missing() {
let block = repo.store.get(block_id)?;
verifier.put_blocks(vec![block], repo).await?;
}
let add_file_commit_body = CommitBodyV0::AddFile(AddFile::V0(AddFileV0 {
name: add.filename,
metadata: vec![],
}));
verifier
.new_commit(
add_file_commit_body,
&repo_id,
&branch,
&store_repo,
&vec![],
vec![],
vec![add.object],
)
.await?;
}
AppRequestPayloadV0::SmallFilePut(small) => {}
AppRequestPayloadV0::RandomAccessFilePut(content_type) => {
let (repo_id, _, store_repo) =
Self::resolve_target(verifier, &nuri.target)?;
let repo = verifier.get_repo(&repo_id, &store_repo)?;
let id = verifier.start_upload(content_type, Arc::clone(&repo.store));
return Ok(AppResponse::V0(AppResponseV0::FileUploading(id)));
}
AppRequestPayloadV0::RandomAccessFilePutChunk((id, chunk)) => {
if chunk.len() > 0 {
verifier.continue_upload(id, &chunk)?;
} else {
let reference = verifier.finish_upload(id)?;
return Ok(AppResponse::V0(AppResponseV0::FileUploaded(reference)));
}
}
_ => return Err(NgError::InvalidPayload),
},
},
_ => unimplemented!(),
}
Ok(AppResponse::V0(AppResponseV0::Ok))
}
}

@ -49,11 +49,11 @@ impl UserStorage for RocksDbUserStorage {
fn load_store(
&self,
repo_store: &StoreRepo,
store_repo: &StoreRepo,
block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Repo, StorageError> {
RepoStorage::load(
repo_store.repo_id(),
store_repo.repo_id(),
Right(block_storage),
&self.user_storage,
)
@ -75,4 +75,11 @@ impl UserStorage for RocksDbUserStorage {
fn update_signer_cap(&self, signer_cap: &SignerCap) -> Result<(), StorageError> {
RepoStorage::update_signer_cap(signer_cap, &self.user_storage)
}
fn branch_add_file(&self, branch: BranchId, file: FileName) -> Result<(), StorageError> {
todo!();
}
fn branch_get_all_files(&self, branch: &BranchId) -> Result<Vec<FileName>, StorageError> {
todo!();
}
}

@ -50,6 +50,13 @@ impl SiteV0 {
}
}
pub fn get_individual_site_private_store_read_cap(&self) -> Option<ReadCap> {
match &self.site_type {
SiteType::Individual((_, read_cap)) => Some(read_cap.clone()),
_ => None,
}
}
fn site_store_to_store_repo(site_store: &SiteStore) -> StoreRepo {
StoreRepo::V0(match site_store.store_type {
SiteStoreType::Public => StoreRepoV0::PublicStore(site_store.id),
@ -237,7 +244,12 @@ impl SiteV0 {
user_priv_key: PrivKey,
verifier: &mut Verifier,
) -> Result<Self, NgError> {
Self::create_individual_(user_priv_key, verifier, SiteName::Personal).await
let site = Self::create_individual_(user_priv_key, verifier, SiteName::Personal).await?;
verifier.config.private_store_read_cap = site.get_individual_site_private_store_read_cap();
verifier.config.private_store_id = Some(site.private.id);
verifier.config.protected_store_id = Some(site.protected.id);
verifier.config.public_store_id = Some(site.public.id);
Ok(site)
}
pub async fn create_org(name: String) -> Result<Self, NgError> {

@ -150,9 +150,11 @@ pub struct VerifierConfig {
pub user_priv_key: PrivKey,
pub private_store_read_cap: Option<ObjectRef>,
pub private_store_id: Option<RepoId>,
pub public_store_id: Option<RepoId>,
pub protected_store_id: Option<RepoId>,
}
pub type CancelFn = Box<dyn FnOnce()>;
pub type CancelFn = Box<dyn FnOnce() + Sync + Send>;
//
// APP PROTOCOL (between APP and VERIFIER)
@ -160,29 +162,97 @@ pub type CancelFn = Box<dyn FnOnce()>;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppFetchContentV0 {
Get, // more to be detailed
Subscribe,
Get, // does not subscribe. more to be detailed
Subscribe, // more to be detailed
Update,
ReadQuery, // more to be detailed
//Invoke,
ReadQuery, // more to be detailed
WriteQuery, // more to be detailed
//Invoke,
}
impl AppFetchContentV0 {
pub fn get_or_subscribe(subscribe: bool) -> Self {
if subscribe {
AppFetchContentV0::Subscribe
} else {
AppFetchContentV0::Get
}
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum NgAccessV0 {
ReadCap(ReadCap),
Token(Digest),
#[serde(with = "serde_bytes")]
ExtRequest(Vec<u8>),
Key(BlockKey),
Inbox(Digest),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum TargetBranchV0 {
Chat,
Stream,
Context,
Ontology,
BranchId(BranchId),
Named(String), // branch or commit
Commits(Vec<ObjectId>), // only possible if access to their branch is given. must belong to the same branch.
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AppFetchV0 {
pub doc_id: RepoId,
pub enum NuriTargetV0 {
UserSite, // targets the whole data set of the user
pub branch_id: Option<BranchId>,
PublicStore,
ProtectedStore,
PrivateStore,
AllDialogs,
Dialog(String), // shortname of a Dialog
AllGroups,
Group(String), // shortname of a Group
pub store: StoreRepo,
Repo(RepoId),
pub content: AppFetchContentV0,
Identity(UserId),
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppRequestContentV0 {
FetchNuri,
Fetch(AppFetchV0),
pub struct NuriV0 {
pub target: NuriTargetV0,
pub entire_store: bool, // If it is a store, will (try to) include all the docs belonging to the store
pub object: Option<ObjectId>, // used only for FileGet. // cannot be used for queries. only to download an object (file,commit..)
pub branch: Option<TargetBranchV0>, // if None, the main branch is chosen
pub overlay: Option<OverlayLink>,
pub access: Vec<NgAccessV0>,
pub topic: Option<TopicId>,
pub locator: Vec<PeerAdvert>,
}
impl NuriV0 {
pub fn new_private_store_target() -> Self {
Self {
target: NuriTargetV0::PrivateStore,
entire_store: false,
object: None,
branch: None,
overlay: None,
access: vec![],
topic: None,
locator: vec![],
}
}
pub fn new(from: String) -> Self {
unimplemented!();
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum AppRequestCommandV0 {
Fetch(AppFetchContentV0),
Pin,
UnPin,
Delete,
@ -193,9 +263,9 @@ pub enum AppRequestContentV0 {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct AppRequestV0 {
pub nuri: Option<String>,
pub command: AppRequestCommandV0,
pub content: AppRequestContentV0,
pub nuri: NuriV0,
pub payload: Option<AppRequestPayload>,
}
@ -237,6 +307,12 @@ pub struct DocUpdate {
discrete: Option<DiscreteUpdate>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DocAddFile {
pub filename: Option<String>,
pub object: ObjectRef,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct DocCreate {
store: StoreRepo,
@ -254,11 +330,13 @@ pub enum AppRequestPayloadV0 {
Create(DocCreate),
Query(DocQuery),
Update(DocUpdate),
AddFile(DocAddFile),
//RemoveFile
Delete(DocDelete),
//Invoke(InvokeArguments),
SmallFilePut(SmallFile),
RandomAccessFilePut(String), // content_type
RandomAccessFilePutChunk((ObjectId, serde_bytes::ByteBuf)), // end the upload with an empty vec
RandomAccessFilePut(String), // content_type
RandomAccessFilePutChunk((u32, serde_bytes::ByteBuf)), // end the upload with an empty vec
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -321,8 +399,16 @@ pub struct AppPatch {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct FileName {
name: Option<String>,
reference: ObjectRef,
pub heads: Vec<ObjectId>,
pub name: Option<String>,
pub reference: ObjectRef,
pub nuri: String,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct FileMetaV0 {
pub content_type: String,
pub size: u64,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
@ -331,9 +417,13 @@ pub enum AppResponseV0 {
Patch(AppPatch),
Text(String),
File(FileName),
FileUploading(u32),
FileUploaded(ObjectRef),
#[serde(with = "serde_bytes")]
FileBinary(Vec<u8>),
FileMeta(FileMetaV0),
QueryResult, // see sparesults
Ok,
}
#[derive(Clone, Debug, Serialize, Deserialize)]

@ -32,7 +32,7 @@ pub trait UserStorage: Send + Sync {
fn load_store(
&self,
repo_store: &StoreRepo,
store_repo: &StoreRepo,
block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Repo, StorageError>;
@ -43,53 +43,65 @@ pub trait UserStorage: Send + Sync {
fn add_branch(&self, repo_id: &RepoId, branch_info: &BranchInfo) -> Result<(), StorageError>;
fn update_signer_cap(&self, signer_cap: &SignerCap) -> Result<(), StorageError>;
fn branch_add_file(&self, branch: BranchId, file: FileName) -> Result<(), StorageError>;
fn branch_get_all_files(&self, branch: &BranchId) -> Result<Vec<FileName>, StorageError>;
}
pub(crate) struct InMemoryUserStorage {
branch_files: RwLock<HashMap<BranchId, Vec<FileName>>>,
}
impl InMemoryUserStorage {
pub fn new() -> Self {
InMemoryUserStorage {
branch_files: RwLock::new(HashMap::new()),
}
}
}
// pub(crate) struct InMemoryUserStorage {
// repo_id_to_store_overlay: HashMap<RepoId, StoreOverlay>,
// }
// impl InMemoryUserStorage {
// pub fn new() -> Self {
// InMemoryUserStorage {
// repo_id_to_store_overlay: HashMap::new(),
// }
// }
// }
// impl UserStorage for InMemoryUserStorage {
// fn repo_id_to_store_overlay(&self, id: &RepoId) -> Result<StoreOverlay, StorageError> {
// Ok(self
// .repo_id_to_store_overlay
// .get(&id)
// .ok_or(StorageError::NotFound)?
// .to_owned())
// }
// fn get_all_store_and_repo_ids(&self) -> Result<HashMap<StoreRepo, Vec<RepoId>>, StorageError> {
// unimplemented!();
// }
// fn load_store(
// &self,
// repo_store: &StoreRepo,
// block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
// ) -> Result<Repo, StorageError> {
// unimplemented!();
// }
// fn load_repo(&self, repo_id: &RepoId, store: Arc<Store>) -> Result<Repo, StorageError> {
// unimplemented!();
// }
// fn save_repo(&self, repo: &Repo) -> Result<(), StorageError> {
// unimplemented!();
// }
// fn add_branch(&self, repo_id: &RepoId, branch_info: &BranchInfo) -> Result<(), StorageError> {
// unimplemented!();
// }
// fn update_signer_cap(&self, signer_cap: &SignerCap) -> Result<(), StorageError> {
// unimplemented!();
// }
// }
impl UserStorage for InMemoryUserStorage {
fn branch_add_file(&self, branch: BranchId, file: FileName) -> Result<(), StorageError> {
let mut lock = self.branch_files.write().unwrap();
let file_list = lock.entry(branch).or_insert_with(|| Vec::with_capacity(1));
file_list.push(file);
Ok(())
}
fn branch_get_all_files(&self, branch: &BranchId) -> Result<Vec<FileName>, StorageError> {
let lock = self.branch_files.read().unwrap();
if let Some(file_list) = lock.get(&branch) {
Ok(file_list.to_vec())
} else {
Ok(vec![])
}
}
fn get_all_store_and_repo_ids(&self) -> Result<HashMap<StoreRepo, Vec<RepoId>>, StorageError> {
unimplemented!();
}
fn load_store(
&self,
store_repo: &StoreRepo,
block_storage: Arc<RwLock<dyn BlockStorage + Send + Sync>>,
) -> Result<Repo, StorageError> {
unimplemented!();
}
fn load_repo(&self, repo_id: &RepoId, store: Arc<Store>) -> Result<Repo, StorageError> {
unimplemented!();
}
fn save_repo(&self, repo: &Repo) -> Result<(), StorageError> {
unimplemented!();
}
fn add_branch(&self, repo_id: &RepoId, branch_info: &BranchInfo) -> Result<(), StorageError> {
unimplemented!();
}
fn update_signer_cap(&self, signer_cap: &SignerCap) -> Result<(), StorageError> {
unimplemented!();
}
}

@ -10,9 +10,13 @@
//! Repo object (on heap) to handle a Repository
use crate::commits::*;
use crate::types::*;
use crate::user_storage::InMemoryUserStorage;
use async_std::stream::StreamExt;
use futures::channel::mpsc;
use futures::SinkExt;
use ng_net::actor::SoS;
use ng_net::broker::{Broker, BROKER};
use ng_repo::block_storage::store_max_value_size;
use ng_repo::log::*;
use ng_repo::object::Object;
use ng_repo::repo::BranchInfo;
@ -26,6 +30,8 @@ use ng_repo::{
utils::{generate_keypair, sign},
};
use std::cmp::max;
use std::collections::BTreeMap;
use std::collections::HashSet;
use std::fs::{create_dir_all, read, write, File, OpenOptions};
use std::io::Write;
@ -66,14 +72,15 @@ pub struct Verifier {
pub config: VerifierConfig,
pub connected_server_id: Option<PubKey>,
graph_dataset: Option<oxigraph::store::Store>,
user_storage: Option<Arc<Box<dyn UserStorage>>>,
pub(crate) user_storage: Option<Arc<Box<dyn UserStorage>>>,
block_storage: Option<Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>>,
last_seq_num: u64,
peer_id: PubKey,
max_reserved_seq_num: u64,
last_reservation: SystemTime,
stores: HashMap<OverlayId, Arc<Store>>,
repos: HashMap<RepoId, Repo>,
inner_to_outer: HashMap<OverlayId, OverlayId>,
pub(crate) repos: HashMap<RepoId, Repo>,
// TODO: deal with collided repo_ids. self.repos should be a HashMap<RepoId,Collision> enum Collision {Yes, No(Repo)}
// add a collided_repos: HashMap<(OverlayId, RepoId), Repo>
// only use get_repo() everywhere in the code (always passing the overlay) so that collisions can be handled.
@ -82,6 +89,8 @@ pub struct Verifier {
pub(crate) topics: HashMap<(OverlayId, TopicId), (RepoId, BranchId)>,
/// only used for InMemory type, to store the outbox
in_memory_outbox: Vec<EventOutboxStorage>,
uploads: BTreeMap<u32, RandomAccessFile>,
branch_subscriptions: HashMap<BranchId, Sender<AppResponse>>,
}
impl fmt::Debug for Verifier {
@ -104,10 +113,97 @@ impl Verifier {
&self.config.user_priv_key
}
pub(crate) fn start_upload(&mut self, content_type: String, store: Arc<Store>) -> u32 {
let mut first_available: u32 = 0;
for upload in self.uploads.keys() {
if *upload != first_available + 1 {
break;
} else {
first_available += 1;
}
}
first_available += 1;
let ret = self.uploads.insert(
first_available,
RandomAccessFile::new_empty(store_max_value_size(), content_type, vec![], store),
);
assert!(ret.is_none());
first_available
}
pub(crate) fn continue_upload(
&mut self,
upload_id: u32,
data: &Vec<u8>,
) -> Result<(), NgError> {
let file = self
.uploads
.get_mut(&upload_id)
.ok_or(NgError::WrongUploadId)?;
Ok(file.write(data)?)
}
pub(crate) fn finish_upload(&mut self, upload_id: u32) -> Result<ObjectRef, NgError> {
let mut file = self
.uploads
.remove(&upload_id)
.ok_or(NgError::WrongUploadId)?;
let id = file.save()?;
Ok(file.reference().unwrap())
}
pub(crate) async fn push_app_response(&mut self, branch: &BranchId, response: AppResponse) {
// log_info!(
// "push_app_response {} {:?}",
// branch,
// self.branch_subscriptions
// );
if let Some(sender) = self.branch_subscriptions.get_mut(branch) {
let _ = sender.send(response).await;
}
}
pub(crate) async fn create_branch_subscription(
&mut self,
branch: BranchId,
) -> Result<(Receiver<AppResponse>, CancelFn), VerifierError> {
// async fn send(mut tx: Sender<AppResponse>, msg: AppResponse) -> ResultSend<()> {
// while let Ok(_) = tx.send(msg.clone()).await {
// log_debug!("sending AppResponse");
// sleep!(std::time::Duration::from_secs(3));
// }
// log_debug!("end of sending");
// Ok(())
// }
// spawn_and_log_error(send(tx.clone(), commit));
//log_info!("#### create_branch_subscription {}", branch);
let (tx, rx) = mpsc::unbounded::<AppResponse>();
if let Some(returned) = self.branch_subscriptions.insert(branch, tx.clone()) {
if !returned.is_closed() {
return Err(VerifierError::DoubleBranchSubscription);
}
}
//let tx = self.branch_subscriptions.entry(branch).or_insert_with(|| {});
for file in self
.user_storage
.as_ref()
.unwrap()
.branch_get_all_files(&branch)?
{
self.push_app_response(&branch, AppResponse::V0(AppResponseV0::File(file)))
.await;
}
let fnonce = Box::new(move || {
tx.close_channel();
});
Ok((rx, fnonce))
}
#[allow(deprecated)]
#[cfg(any(test, feature = "testing"))]
pub fn new_dummy() -> Self {
use ng_repo::block_storage::HashMapBlockStorage;
let (peer_priv_key, peer_id) = generate_keypair();
let block_storage = Arc::new(std::sync::RwLock::new(HashMapBlockStorage::new()))
as Arc<std::sync::RwLock<dyn BlockStorage + Send + Sync>>;
@ -119,6 +215,8 @@ impl Verifier {
user_priv_key: PrivKey::random_ed(),
private_store_read_cap: None,
private_store_id: None,
protected_store_id: None,
public_store_id: None,
},
connected_server_id: None,
graph_dataset: None,
@ -132,6 +230,9 @@ impl Verifier {
repos: HashMap::new(),
topics: HashMap::new(),
in_memory_outbox: vec![],
inner_to_outer: HashMap::new(),
uploads: BTreeMap::new(),
branch_subscriptions: HashMap::new(),
}
}
@ -406,6 +507,65 @@ impl Verifier {
Ok(self.last_seq_num)
}
pub(crate) async fn new_commit(
&mut self,
commit_body: CommitBodyV0,
repo_id: &RepoId,
branch_id: &BranchId,
store_repo: &StoreRepo,
additional_blocks: &Vec<BlockId>,
deps: Vec<ObjectRef>,
files: Vec<ObjectRef>,
) -> Result<(), NgError> {
let commit = {
let repo = self.get_repo(repo_id, &store_repo)?;
let branch = repo.branch(branch_id)?;
let commit = Commit::new_with_body_and_save(
self.user_privkey(),
&self.user_privkey().to_pub(),
*branch_id,
QuorumType::NoSigning,
deps,
vec![],
branch.current_heads.clone(),
vec![],
files,
vec![],
vec![],
CommitBody::V0(commit_body),
0,
&repo.store,
)?;
self.verify_commit(&commit, branch_id, repo_id, Arc::clone(&repo.store))
.await?;
commit
};
//log_info!("{}", commit);
self.new_event(&commit, additional_blocks, *repo_id, store_repo)
.await
}
pub(crate) async fn new_commit_simple(
&mut self,
commit_body: CommitBodyV0,
repo_id: &RepoId,
branch_id: &BranchId,
store_repo: &StoreRepo,
additional_blocks: &Vec<BlockId>,
) -> Result<(), NgError> {
self.new_commit(
commit_body,
repo_id,
branch_id,
store_repo,
additional_blocks,
vec![],
vec![],
)
.await
}
pub(crate) async fn new_events_with_repo(
&mut self,
events: Vec<(Commit, Vec<Digest>)>,
@ -530,7 +690,9 @@ impl Verifier {
} else {
match &self.config.config_type {
VerifierConfigType::JsSaveSession(js) => {
//log_info!("========== SAVING EVENT {:03}", event.seq_num());
let e = EventOutboxStorage { event, overlay };
(js.outbox_write_function)(
self.peer_id,
e.event.seq_num(),
@ -567,26 +729,90 @@ impl Verifier {
Ok(())
}
async fn send_event<'a>(
pub(crate) async fn open_branch<'a>(
&mut self,
event: Event,
repo_id: &RepoId,
branch: &BranchId,
as_publisher: bool,
) -> Result<(), NgError> {
let user = self.config.user_priv_key.to_pub();
let remote = self
.connected_server_id
.as_ref()
.ok_or(NgError::NotConnected)?
.clone();
self.open_branch_(
repo_id,
branch,
as_publisher,
&BROKER.read().await,
&user,
&remote,
)
.await
}
pub(crate) async fn put_blocks(&self, blocks: Vec<Block>, repo: &Repo) -> Result<(), NgError> {
let overlay = repo.store.overlay_for_read_on_client_protocol();
let broker = BROKER.read().await;
let user = self.config.user_priv_key.to_pub();
let remote = self.connected_server_id.to_owned().unwrap();
let msg = BlocksPut::V0(BlocksPutV0 {
blocks,
overlay: Some(overlay),
});
broker.request::<BlocksPut, ()>(&user, &remote, msg).await?;
Ok(())
}
pub(crate) async fn has_blocks(
&self,
blocks: Vec<BlockId>,
repo: &Repo,
) -> Result<BlocksFound, NgError> {
let overlay = repo.store.overlay_for_read_on_client_protocol();
let broker = BROKER.read().await;
let user = self.config.user_priv_key.to_pub();
let remote = self.connected_server_id.to_owned().unwrap();
let msg = BlocksExist::V0(BlocksExistV0 {
blocks,
overlay: Some(overlay),
});
if let SoS::Single(found) = broker
.request::<BlocksExist, BlocksFound>(&user, &remote, msg)
.await?
{
Ok(found)
} else {
Err(NgError::InvalidResponse)
}
}
async fn open_branch_<'a>(
&mut self,
repo_id: &RepoId,
branch: &BranchId,
as_publisher: bool,
broker: &RwLockReadGuard<'a, Broker<'a>>,
user: &UserId,
remote: &DirectPeerId,
overlay: OverlayId,
) -> Result<(), NgError> {
assert!(overlay.is_inner());
let (repo_id, branch_id) = self
.topics
.get(&(overlay, *event.topic_id()))
.ok_or(NgError::TopicNotFound)?
.to_owned();
let opened_as_publisher;
{
let repo = self.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
opened_as_publisher = repo.branch_is_opened_as_publisher(&branch_id);
}
if !opened_as_publisher {
let (need_open, mut need_sub, overlay) = {
let repo = self.repos.get(repo_id).ok_or(NgError::RepoNotFound)?;
let overlay = repo.store.overlay_for_read_on_client_protocol();
match repo.opened_branches.get(branch) {
Some(val) => (false, as_publisher && !val, overlay),
None => (repo.opened_branches.len() == 0, true, overlay),
}
};
//log_info!("need_open {} need_sub {}", need_open, need_sub);
if need_open {
// TODO: implement OpenRepo. for now we always do a Pinning because OpenRepo is not implemented on the broker.
let msg = RepoPinStatusReq::V0(RepoPinStatusReqV0 {
hash: repo_id.into(),
overlay: Some(overlay),
@ -598,18 +824,35 @@ impl Verifier {
Err(NgError::ServerError(ServerError::False))
| Err(NgError::ServerError(ServerError::RepoAlreadyOpened)) => {
// pinning the repo on the server broker
let pin_req;
{
let repo = self.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
pin_req = PinRepo::from_repo(repo, remote);
}
let (pin_req, topic_id) = {
let repo = self.repos.get(repo_id).ok_or(NgError::RepoNotFound)?;
let topic_id = repo.branch(branch).unwrap().topic;
//TODO: only pin the requested branch.
let pin_req = PinRepo::from_repo(repo, remote);
(pin_req, topic_id)
};
match broker
.request::<PinRepo, RepoOpened>(user, remote, pin_req)
.await
{
Ok(SoS::Single(opened)) => {
self.repo_was_opened(&repo_id, &opened)?;
self.repo_was_opened(repo_id, &opened)?;
//TODO: check that in the returned opened_repo, the branch we are interested in has effectively been subscribed as publisher by the broker.
for topic in opened {
if topic.topic_id() == &topic_id {
self.do_sync_req_if_needed(
broker,
user,
remote,
branch,
repo_id,
topic.known_heads(),
)
.await?;
}
}
}
Ok(_) => return Err(NgError::InvalidResponse),
Err(e) => return Err(e),
@ -619,39 +862,83 @@ impl Verifier {
Ok(SoS::Single(pin_status)) => {
// checking that the branch is subscribed as publisher
if !pin_status.is_topic_subscribed_as_publisher(event.topic_id()) {
// we need to subscribe as publisher
let topic_sub;
{
let repo = self.repos.get(&repo_id).ok_or(NgError::RepoNotFound)?;
let branch_info = repo.branch(&branch_id)?;
if branch_info.topic_priv_key.is_none() {
return Err(NgError::PermissionDenied);
}
topic_sub = TopicSub::new(repo, branch_info, Some(remote));
}
match broker
.request::<TopicSub, TopicSubRes>(user, remote, topic_sub)
.await
{
Ok(SoS::Single(sub)) => {
// TODO, deal with heads
let repo =
self.repos.get_mut(&repo_id).ok_or(NgError::RepoNotFound)?;
Self::branch_was_opened(&self.topics, repo, &sub)?;
}
Ok(_) => return Err(NgError::InvalidResponse),
Err(e) => {
return Err(e);
}
}
let repo = self.repos.get(repo_id).ok_or(NgError::RepoNotFound)?;
let branch_info = repo.branch(branch)?;
let topic_id = &branch_info.topic;
// log_info!(
// "as_publisher {} {}",
// as_publisher,
// pin_status.is_topic_subscribed_as_publisher(topic_id)
// );
if as_publisher && !pin_status.is_topic_subscribed_as_publisher(topic_id) {
need_sub = true;
}
}
_ => return Err(NgError::InvalidResponse),
}
// TODO: deal with received known_heads.
// TODO a TopicSync
}
if need_sub {
// we subscribe
let repo = self.repos.get(repo_id).ok_or(NgError::RepoNotFound)?;
let branch_info = repo.branch(branch)?;
let broker_id = if as_publisher {
if branch_info.topic_priv_key.is_none() {
// we need to subscribe as publisher, but we cant
return Err(NgError::PermissionDenied);
}
Some(remote)
} else {
None
};
let topic_sub = TopicSub::new(repo, branch_info, broker_id);
match broker
.request::<TopicSub, TopicSubRes>(user, remote, topic_sub)
.await
{
Ok(SoS::Single(sub)) => {
let repo = self.repos.get_mut(&repo_id).ok_or(NgError::RepoNotFound)?;
Self::branch_was_opened(&self.topics, repo, &sub)?;
self.do_sync_req_if_needed(
broker,
user,
remote,
branch,
repo_id,
sub.known_heads(),
)
.await?;
}
Ok(_) => return Err(NgError::InvalidResponse),
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
async fn send_event<'a>(
&mut self,
event: Event,
broker: &RwLockReadGuard<'a, Broker<'a>>,
user: &UserId,
remote: &DirectPeerId,
overlay: OverlayId,
) -> Result<(), NgError> {
assert!(overlay.is_inner());
let (repo_id, branch_id) = self
.topics
.get(&(overlay, *event.topic_id()))
.ok_or(NgError::TopicNotFound)?
.to_owned();
self.open_branch_(&repo_id, &branch_id, true, broker, user, remote)
.await?;
let _ = broker
.request::<PublishEvent, ()>(user, remote, PublishEvent::new(event, overlay))
@ -660,11 +947,48 @@ impl Verifier {
Ok(())
}
pub fn deliver(&mut self, event: Event) {}
pub async fn deliver(&mut self, event: Event, overlay: OverlayId) {
let event_str = event.to_string();
if let Err(e) = self.deliver_(event, overlay).await {
log_err!("DELIVERY ERROR {} {}", e, event_str);
}
}
async fn deliver_(&mut self, event: Event, overlay: OverlayId) -> Result<(), NgError> {
let (repo_id, branch_id) = self
.topics
.get(&(overlay, *event.topic_id()))
.ok_or(NgError::TopicNotFound)?
.to_owned();
// let outer = self
// .inner_to_outer
// .get(&overlay)
// .ok_or(VerifierError::OverlayNotFound)?;
// let store = self
// .stores
// .get(outer)
// .ok_or(VerifierError::OverlayNotFound)?;
let repo = self
.repos
.get(&repo_id)
.ok_or(VerifierError::RepoNotFound)?;
repo.branch_is_opened(&branch_id)
.then_some(true)
.ok_or(VerifierError::BranchNotOpened)?;
let branch = repo.branch(&branch_id)?;
let commit = event.open(&repo.store, &repo_id, &branch_id, &branch.read_cap.key)?;
self.verify_commit(&commit, &branch_id, &repo_id, Arc::clone(&repo.store))
.await?;
Ok(())
}
pub fn verify_commit(
pub async fn verify_commit(
&mut self,
commit: Commit,
commit: &Commit,
branch_id: &BranchId,
repo_id: &RepoId,
store: Arc<Store>,
@ -676,23 +1000,26 @@ impl Verifier {
// commit,
// store
// );
//log_info!("{}", commit);
// TODO: check that DAG is well formed. check the heads
let res = match commit.body().ok_or(VerifierError::CommitBodyNotFound)? {
CommitBody::V0(v0) => match v0 {
CommitBodyV0::Repository(a) => a.verify(&commit, self, branch_id, repo_id, store),
CommitBodyV0::RootBranch(a) => a.verify(&commit, self, branch_id, repo_id, store),
CommitBodyV0::Branch(a) => a.verify(&commit, self, branch_id, repo_id, store),
CommitBodyV0::SyncSignature(a) => {
a.verify(&commit, self, branch_id, repo_id, store)
}
CommitBodyV0::AddBranch(a) => a.verify(&commit, self, branch_id, repo_id, store),
CommitBodyV0::StoreUpdate(a) => a.verify(&commit, self, branch_id, repo_id, store),
CommitBodyV0::AddSignerCap(a) => a.verify(&commit, self, branch_id, repo_id, store),
CommitBodyV0::Repository(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::RootBranch(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::Branch(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::SyncSignature(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::AddBranch(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::StoreUpdate(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::AddSignerCap(a) => a.verify(commit, self, branch_id, repo_id, store),
CommitBodyV0::AddFile(a) => a.verify(commit, self, branch_id, repo_id, store),
_ => {
log_err!("unimplemented verifier {}", commit);
Err(VerifierError::NotImplemented)
return Err(VerifierError::NotImplemented);
}
},
};
let res = res.await;
if res.is_ok() {
let commit_ref = commit.reference().unwrap();
if let Some(repo) = self.repos.get_mut(repo_id) {
@ -777,7 +1104,8 @@ impl Verifier {
store_repo: &StoreRepo,
) -> Result<&Repo, VerifierError> {
//let store = self.get_store(store_repo);
let repo_ref = self.repos.get(id).ok_or(VerifierError::RepoNotFound);
let repo_ref: Result<&Repo, VerifierError> =
self.repos.get(id).ok_or(VerifierError::RepoNotFound);
repo_ref
}
@ -794,6 +1122,67 @@ impl Verifier {
Ok(())
}
async fn do_sync_req_if_needed<'a>(
&mut self,
broker: &RwLockReadGuard<'a, Broker<'a>>,
user: &UserId,
remote: &DirectPeerId,
branch_id: &BranchId,
repo_id: &RepoId,
remote_heads: &Vec<ObjectId>,
) -> Result<(), NgError> {
let (store, msg, branch_secret) = {
let repo = self.repos.get(repo_id).unwrap();
let branch_info = repo.branch(branch_id)?;
let store = Arc::clone(&repo.store);
let ours = branch_info.current_heads.iter().map(|refe| refe.id);
let ours_set: HashSet<Digest> = HashSet::from_iter(ours.clone());
let theirs = HashSet::from_iter(remote_heads.clone().into_iter());
if theirs.len() == 0 {
log_info!("branch is new on the broker. doing nothing");
return Ok(());
}
if ours_set.difference(&theirs).count() == 0
&& theirs.difference(&ours_set).count() == 0
{
// no need to sync
log_info!("branch is up to date");
return Ok(());
}
let msg = TopicSyncReq::V0(TopicSyncReqV0 {
topic: branch_info.topic,
known_heads: ours.collect(),
target_heads: remote_heads.clone(),
overlay: Some(store.overlay_for_read_on_client_protocol()),
});
(store, msg, branch_info.read_cap.key.clone())
};
match broker
.request::<TopicSyncReq, TopicSyncRes>(user, remote, msg)
.await
{
Err(e) => return Err(e),
Ok(SoS::Stream(mut events)) => {
while let Some(event) = events.next().await {
let commit = event
.event()
.open(&store, repo_id, branch_id, &branch_secret)?;
self.verify_commit(&commit, branch_id, repo_id, Arc::clone(&store))
.await?;
}
}
Ok(_) => return Err(NgError::InvalidResponse),
}
Ok(())
}
async fn do_sync_req<'a>(
&mut self,
broker: &RwLockReadGuard<'a, Broker<'a>>,
@ -817,7 +1206,8 @@ impl Verifier {
.event()
.open(&store, repo_id, branch_id, branch_secret)?;
self.verify_commit(commit, branch_id, repo_id, Arc::clone(&store))?;
self.verify_commit(&commit, branch_id, repo_id, Arc::clone(&store))
.await?;
}
}
Ok(_) => return Err(NgError::InvalidResponse),
@ -925,7 +1315,7 @@ impl Verifier {
});
match broker.request::<CommitGet, Block>(user, remote, msg).await {
Err(NgError::ServerError(ServerError::NotFound)) => {
// TODO: fallback to BlockGet, then Commit::load(with_body:true), which will return an Err(CommitLoadError::MissingBlocks), then do another BlockGet with those, and then again Commit::load...
// TODO: fallback to BlocksGet, then Commit::load(with_body:true), which will return an Err(CommitLoadError::MissingBlocks), then do another BlocksGet with those, and then again Commit::load...
return Err(NgError::SiteNotFoundOnBroker);
}
Ok(SoS::Stream(blockstream)) => {
@ -945,8 +1335,44 @@ impl Verifier {
}
}
pub(crate) async fn fetch_blocks_if_needed(
&self,
id: &BlockId,
repo_id: &RepoId,
store_repo: &StoreRepo,
) -> Result<Option<Receiver<Block>>, NgError> {
let repo = self.get_repo(repo_id, store_repo)?;
let overlay = repo.store.overlay_for_read_on_client_protocol();
let broker = BROKER.read().await;
let user = self.config.user_priv_key.to_pub();
let remote = self.connected_server_id.to_owned().unwrap();
match repo.store.has(id) {
Err(StorageError::NotFound) => {
let msg = BlocksGet::V0(BlocksGetV0 {
ids: vec![*id],
topic: None,
include_children: true,
overlay: Some(overlay),
});
match broker
.request::<BlocksGet, Block>(&user, &remote, msg)
.await
{
Ok(SoS::Stream(blockstream)) => Ok(Some(blockstream)),
Ok(_) => return Err(NgError::InvalidResponse),
Err(e) => return Err(e),
}
}
Err(e) => Err(e.into()),
Ok(()) => Ok(None),
}
}
async fn bootstrap_from_remote(&mut self) -> Result<(), NgError> {
if self.is_in_memory() || self.need_bootstrap() {
if self.need_bootstrap() {
let broker = BROKER.read().await;
let user = self.config.user_priv_key.to_pub();
let remote = self.connected_server_id.to_owned().unwrap();
@ -994,7 +1420,7 @@ impl Verifier {
Ok(Arc::clone(store))
}
fn load_from_credentials_and_outbox(
async fn load_from_credentials_and_outbox(
&mut self,
events: &Vec<EventOutboxStorage>,
) -> Result<(), VerifierError> {
@ -1077,11 +1503,12 @@ impl Verifier {
postponed_signer_caps.push(commit);
} else {
self.verify_commit(
commit,
&commit,
&branch_id.clone(),
private_store.id(),
Arc::clone(&private_store),
)?;
)
.await?;
}
}
}
@ -1150,7 +1577,8 @@ impl Verifier {
let commit = e.event.open(store, store.id(), branch_id, branch_secret)?;
self.verify_commit(commit, &branch_id.clone(), store.id(), Arc::clone(store))?;
self.verify_commit(&commit, &branch_id.clone(), store.id(), Arc::clone(store))
.await?;
} else {
// log_info!(
// "SKIPPED wrong overlay {} {}",
@ -1176,11 +1604,12 @@ impl Verifier {
// finally, ingest the signer_caps.
for signer_cap in postponed_signer_caps {
self.verify_commit(
signer_cap,
&signer_cap,
private_user_branch.as_ref().unwrap(),
private_store.id(),
Arc::clone(&private_store),
)?;
)
.await?;
}
Ok(())
@ -1198,7 +1627,11 @@ impl Verifier {
}
pub async fn send_outbox(&mut self) -> Result<(), NgError> {
let events: Vec<EventOutboxStorage> = self.take_events_from_outbox().unwrap_or(vec![]);
let ret = self.take_events_from_outbox();
// if ret.is_err() {
// log_err!("send_outbox {:}", ret.as_ref().unwrap_err());
// }
let events: Vec<EventOutboxStorage> = ret.unwrap_or(vec![]);
if events.len() == 0 {
return Ok(());
}
@ -1213,34 +1646,34 @@ impl Verifier {
// for all the events, check that they are valid (topic exists, current_heads match with event)
let mut need_replay = false;
let mut events_to_replay = Vec::with_capacity(events.len());
let mut branch_heads: HashMap<BranchId, Vec<ObjectRef>> = HashMap::new();
//let mut branch_heads: HashMap<BranchId, Vec<ObjectRef>> = HashMap::new();
for e in events {
match self.topics.get(&(e.overlay, *e.event.topic_id())) {
Some((repo_id, branch_id)) => match self.repos.get(repo_id) {
Some(repo) => match repo.branches.get(branch_id) {
Some(branch) => {
let commit = e.event.open_with_info(repo, branch)?;
let acks = commit.acks();
match branch_heads.get(branch_id) {
Some(previous_heads) => {
if *previous_heads != acks {
// skip event, as it is outdated.
continue;
} else {
branch_heads
.insert(*branch_id, vec![commit.reference().unwrap()]);
}
}
None => {
if acks != branch.current_heads {
// skip event, as it is outdated.
continue;
} else {
branch_heads
.insert(*branch_id, vec![commit.reference().unwrap()]);
}
}
}
// let commit = e.event.open_with_info(repo, branch)?;
// let acks = commit.acks();
// match branch_heads.get(branch_id) {
// Some(previous_heads) => {
// if *previous_heads != acks {
// // skip event, as it is outdated.
// continue;
// } else {
// branch_heads
// .insert(*branch_id, vec![commit.reference().unwrap()]);
// }
// }
// None => {
// if acks != branch.current_heads {
// // skip event, as it is outdated.
// continue;
// } else {
// branch_heads
// .insert(*branch_id, vec![commit.reference().unwrap()]);
// }
// }
// }
}
None => {
log_info!("REPLAY BRANCH NOT FOUND {}", branch_id);
@ -1265,7 +1698,8 @@ impl Verifier {
}
log_info!("NEED REPLAY {need_replay}");
if need_replay {
self.load_from_credentials_and_outbox(&events_to_replay)?;
self.load_from_credentials_and_outbox(&events_to_replay)
.await?;
log_info!("REPLAY DONE");
}
log_info!("SENDING {} EVENTS FOR OUTBOX", events_to_replay.len());
@ -1338,7 +1772,7 @@ impl Verifier {
let (graph, user, block) = match &config.config_type {
VerifierConfigType::Memory | VerifierConfigType::JsSaveSession(_) => (
Some(oxigraph::store::Store::new().unwrap()),
None, //Some(Box::new(InMemoryUserStorage::new()) as Box<dyn UserStorage>),
Some(Box::new(InMemoryUserStorage::new()) as Box<dyn UserStorage>),
Some(block_storage),
),
#[cfg(not(target_family = "wasm"))]
@ -1382,6 +1816,9 @@ impl Verifier {
repos: HashMap::new(),
topics: HashMap::new(),
in_memory_outbox: vec![],
inner_to_outer: HashMap::new(),
uploads: BTreeMap::new(),
branch_subscriptions: HashMap::new(),
};
// this is important as it will load the last seq from storage
if verif.config.config_type.should_load_last_seq_num() {
@ -1392,12 +1829,19 @@ impl Verifier {
Ok(verif)
}
pub fn doc_fetch(
pub async fn app_request_stream(
&mut self,
nuri: String,
payload: Option<AppRequestPayload>,
req: AppRequest,
) -> Result<(Receiver<AppResponse>, CancelFn), NgError> {
unimplemented!();
match req {
AppRequest::V0(v0) => v0.command.process_stream(self, &v0.nuri, &v0.payload).await,
}
}
pub async fn app_request(&mut self, req: AppRequest) -> Result<AppResponse, NgError> {
match req {
AppRequest::V0(v0) => v0.command.process(self, v0.nuri, v0.payload).await,
}
}
pub async fn respond(
@ -1433,10 +1877,19 @@ impl Verifier {
sub: &TopicSubRes,
) -> Result<(), NgError> {
let overlay = repo.store.inner_overlay();
//log_info!("branch_was_opened searching for topic {}", sub.topic_id());
// log_info!(
// "branch_was_opened topic {} overlay {}",
// sub.topic_id(),
// overlay
// );
let (_, branch_id) = topics
.get(&(overlay, *sub.topic_id()))
.ok_or(NgError::TopicNotFound)?;
// log_info!(
// "branch_was_opened insert branch_id {} is_publisher {}",
// branch_id,
// sub.is_publisher()
// );
repo.opened_branches.insert(*branch_id, sub.is_publisher());
Ok(())
}
@ -1447,6 +1900,11 @@ impl Verifier {
opened_repo: &RepoOpened,
) -> Result<(), NgError> {
let repo = self.repos.get_mut(repo_id).ok_or(NgError::RepoNotFound)?;
//TODO: improve the inner_to_outer insert. (should be done when store is created, not here. should work also for dialogs.)
self.inner_to_outer.insert(
repo.store.overlay_for_read_on_client_protocol(),
repo.store.outer_overlay(),
);
for sub in opened_repo {
Self::branch_was_opened(&self.topics, repo, sub)?;
}

@ -70,7 +70,7 @@ impl Wallet {
}
pub fn name(&self) -> String {
match self {
Wallet::V0(v0) => base64_url::encode(&v0.id.slice()),
Wallet::V0(v0) => v0.id.to_string(),
_ => unimplemented!(),
}
}
@ -109,11 +109,11 @@ impl Wallet {
let sig = sign(&wallet_privkey, &wallet_id, &ser_wallet).unwrap();
let wallet_v0 = WalletV0 {
/// ID
// ID
id: wallet_id,
/// Content
// Content
content: wallet_content,
/// Signature over content by wallet's private key
// Signature over content by wallet's private key
sig,
};
@ -552,7 +552,7 @@ pub fn create_wallet_first_step_v0(
let intermediary = CreateWalletIntermediaryV0 {
wallet_privkey,
wallet_name: base64_url::encode(&wallet_id.slice()),
wallet_name: wallet_id.to_string(),
client,
user_privkey,
in_memory: !params.local_save,
@ -742,6 +742,7 @@ pub async fn create_wallet_second_step_v0(
peer_id: PubKey::nil(),
nonce: 0,
encrypted,
test: None,
};
let ser_wallet = serde_bare::to_vec(&wallet_content).unwrap();
@ -749,11 +750,11 @@ pub async fn create_wallet_second_step_v0(
let sig = sign(&params.wallet_privkey, &wallet_id, &ser_wallet).unwrap();
let wallet_v0 = WalletV0 {
/// ID
// ID
id: wallet_id,
/// Content
// Content
content: wallet_content,
/// Signature over content by wallet's private key
// Signature over content by wallet's private key
sig,
};
@ -865,10 +866,7 @@ mod test {
let ser_wallet = to_vec(&NgFile::V0(NgFileV0::Wallet(res.wallet.clone()))).unwrap();
file.write_all(&ser_wallet);
log_debug!(
"wallet id: {:?}",
base64_url::encode(&res.wallet.id().slice())
);
log_debug!("wallet id: {}", res.wallet.id());
log_debug!("pazzle {:?}", display_pazzle(&res.pazzle));
log_debug!("mnemonic {:?}", display_mnemonic(&res.mnemonic));
log_debug!("pin {:?}", pin);

@ -498,13 +498,23 @@ impl SensitiveWallet {
pub fn individual_site(
&self,
user_id: &UserId,
) -> Option<(PrivKey, Option<ReadCap>, Option<RepoId>)> {
) -> Option<(
PrivKey,
Option<ReadCap>,
Option<RepoId>,
Option<RepoId>,
Option<RepoId>,
)> {
match self {
Self::V0(v0) => match v0.sites.get(&user_id.to_string()) {
Some(site) => match &site.site_type {
SiteType::Individual((user, readcap)) => {
Some((user.clone(), Some(readcap.clone()), Some(site.private.id)))
}
SiteType::Individual((user, readcap)) => Some((
user.clone(),
Some(readcap.clone()),
Some(site.private.id),
Some(site.protected.id),
Some(site.public.id),
)),
_ => None,
},
None => None,
@ -639,6 +649,8 @@ pub struct WalletContentV0 {
// WalletLog content encrypted with XChaCha20Poly1305, AD = timestamp and walletID
#[serde(with = "serde_bytes")]
pub encrypted: Vec<u8>,
pub test: Option<String>,
}
/// Wallet Log V0

@ -36,7 +36,7 @@ use ng_repo::errors::*;
use ng_repo::log::*;
use ng_repo::types::*;
use ng_repo::utils::{
decode_key, display_timestamp, generate_keypair, now_timestamp, timestamp_after,
decode_priv_key, display_timestamp, generate_keypair, now_timestamp, timestamp_after,
};
use clap::{arg, command, value_parser, ArgAction, Command};
@ -279,10 +279,10 @@ async fn main_inner() -> Result<(), NgcliError> {
.lines()
.nth(0)
.ok_or(NgcliError::InvalidKeyFile("empty file".to_string()))?;
let res = decode_key(first_line.trim())
let res = decode_priv_key(first_line.trim())
.map_err(|_| NgcliError::InvalidKeyFile("deserialization error".to_string()))?;
file.zeroize();
Some(res)
Some(*res.slice())
}
};
@ -293,20 +293,20 @@ async fn main_inner() -> Result<(), NgcliError> {
//key_string.as_mut().zeroize();
gen_client_keys(key_from_file)
} else {
let res = decode_key(key_string.as_str()).map_err(|_| {
let res = decode_priv_key(key_string.as_str()).map_err(|_| {
NgcliError::InvalidKeyFile(
"check the argument provided in command line".to_string(),
)
})?;
if matches.get_flag("save_key") {
let mut master_key = base64_url::encode(&res);
let mut master_key = res.to_string();
write(key_path.clone(), &master_key)
.map_err(|e| NgcliError::CannotSaveKey(e.to_string()))?;
master_key.zeroize();
log_info!("The key has been saved to {}", key_path.to_str().unwrap());
}
//key_string.as_mut().zeroize();
gen_client_keys(Some(res))
gen_client_keys(Some(*res.slice()))
}
}
None => {
@ -314,7 +314,8 @@ async fn main_inner() -> Result<(), NgcliError> {
gen_client_keys(key_from_file)
} else {
let res = gen_client_keys(None);
let mut master_key = base64_url::encode(&res[0]);
let key = PrivKey::Ed25519PrivKey(res[0]);
let mut master_key = key.to_string();
if matches.get_flag("save_key") {
write(key_path.clone(), &master_key)
.map_err(|e| NgcliError::CannotSaveKey(e.to_string()))?;

@ -35,7 +35,7 @@ use ng_repo::types::SymKey;
use ng_repo::utils::ed_keypair_from_priv_bytes;
use ng_repo::{
types::{PrivKey, PubKey},
utils::{decode_key, generate_keypair, sign, verify},
utils::{decode_key, decode_priv_key, generate_keypair, sign, verify},
};
use serde_json::{from_str, to_string_pretty};
use std::error::Error;
@ -286,12 +286,12 @@ fn prepare_accept_forward_for_domain(
args: &mut Cli,
) -> Result<AcceptForwardForV0, NgError> {
if args.domain_peer.is_some() {
let key = decode_key(args.domain_peer.as_ref().unwrap().as_str())?;
let key = decode_priv_key(args.domain_peer.as_ref().unwrap().as_str())?;
args.domain_peer.as_mut().unwrap().zeroize();
Ok(AcceptForwardForV0::PublicDomainPeer((
domain,
PrivKey::Ed25519PrivKey(key),
key,
"".to_string(),
)))
} else {
@ -425,10 +425,10 @@ async fn main_inner() -> Result<(), NgdError> {
.lines()
.nth(0)
.ok_or(NgdError::InvalidKeyFile("empty file".to_string()))?;
let res = decode_key(first_line.trim())
let res = decode_priv_key(first_line.trim())
.map_err(|_| NgdError::InvalidKeyFile("deserialization error".to_string()))?;
file.zeroize();
Some(res)
Some(*res.slice())
}
};
@ -439,20 +439,19 @@ async fn main_inner() -> Result<(), NgdError> {
args.key.as_mut().unwrap().zeroize();
gen_broker_keys(key_from_file)
} else {
let res = decode_key(key_string.as_str()).map_err(|_| {
let res = decode_priv_key(key_string.as_str()).map_err(|_| {
NgdError::InvalidKeyFile(
"check the argument provided in command line".to_string(),
)
})?;
if args.save_key {
let mut master_key = base64_url::encode(&res);
write(key_path.clone(), &master_key)
write(key_path.clone(), res.to_string())
.map_err(|e| NgdError::CannotSaveKey(e.to_string()))?;
master_key.zeroize();
//master_key.zeroize();
log_info!("The key has been saved to {}", key_path.to_str().unwrap());
}
args.key.as_mut().unwrap().zeroize();
gen_broker_keys(Some(res))
gen_broker_keys(Some(*res.slice()))
}
}
None => {
@ -460,7 +459,8 @@ async fn main_inner() -> Result<(), NgdError> {
gen_broker_keys(key_from_file)
} else {
let res = gen_broker_keys(None);
let mut master_key = base64_url::encode(&res[0]);
let key = PrivKey::Ed25519PrivKey((res[0]));
let mut master_key = key.to_string();
if args.save_key {
write(key_path.clone(), &master_key)
.map_err(|e| NgdError::CannotSaveKey(e.to_string()))?;
@ -912,7 +912,7 @@ async fn main_inner() -> Result<(), NgdError> {
"The PEER_ID provided in the --forward option is invalid",
)
})?;
let peer_id = PubKey::Ed25519PubKey(pub_key_array);
let peer_id = pub_key_array;
let server_type = if parts[0].len() > 0 {
let first_char = parts[0].chars().next().unwrap();

@ -88,8 +88,7 @@ impl Server {
fn get_wallet(&self, encoded_id: String) -> Result<Response, NgHttpError> {
log_debug!("DOWNLOAD wallet {}", encoded_id);
let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?;
let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?;
let wallet_id = PubKey::Ed25519PubKey(*array);
let wallet_id: PubKey = from_slice(&id).map_err(|e| NgHttpError::InvalidParams)?;
let wallet_record =
WalletRecord::open(&wallet_id, &self.store).map_err(|e| NgHttpError::NotFound)?;
let wallet = wallet_record.wallet().map_err(|e| NgHttpError::NotFound)?;
@ -108,8 +107,7 @@ impl Server {
log_debug!("DOWNLOAD bootstrap {}", encoded_id);
let id = base64_url::decode(&encoded_id).map_err(|e| NgHttpError::InvalidParams)?;
let array = slice_as_array!(&id, [u8; 32]).ok_or(NgHttpError::InvalidParams)?;
let wallet_id = PubKey::Ed25519PubKey(*array);
let wallet_id: PubKey = from_slice(&id).map_err(|e| NgHttpError::InvalidParams)?;
let wallet_record =
WalletRecord::open(&wallet_id, &self.store).map_err(|e| NgHttpError::NotFound)?;
let bootstrap = wallet_record

Loading…
Cancel
Save