parent
5fed085379
commit
32fffcb947
@ -0,0 +1,82 @@ |
||||
/* |
||||
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers |
||||
* All rights reserved. |
||||
* Licensed under the Apache License, Version 2.0 |
||||
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
|
||||
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
|
||||
* at your option. All files in the project carrying such |
||||
* notice may not be copied, modified, or distributed except |
||||
* according to those terms. |
||||
*/ |
||||
use crate::broker::{ServerConfig, BROKER}; |
||||
use crate::connection::NoiseFSM; |
||||
use crate::types::*; |
||||
use crate::{actor::*, types::ProtocolMessage}; |
||||
use async_std::sync::Mutex; |
||||
use ng_repo::errors::*; |
||||
use ng_repo::log::*; |
||||
use ng_repo::repo::{BranchInfo, Repo}; |
||||
use ng_repo::store::Store; |
||||
use ng_repo::types::*; |
||||
use serde::{Deserialize, Serialize}; |
||||
use std::sync::Arc; |
||||
|
||||
impl PublishEvent { |
||||
pub fn get_actor(&self, id: i64) -> Box<dyn EActor> { |
||||
Actor::<PublishEvent, ()>::new_responder(id) |
||||
} |
||||
|
||||
pub fn new(event: Event, overlay: OverlayId) -> PublishEvent { |
||||
PublishEvent(event, Some(overlay)) |
||||
} |
||||
pub fn set_overlay(&mut self, overlay: OverlayId) { |
||||
self.1 = Some(overlay); |
||||
} |
||||
|
||||
// pub fn overlay(&self) -> &OverlayId {
|
||||
// self.1.as_ref().unwrap()
|
||||
// }
|
||||
} |
||||
|
||||
impl TryFrom<ProtocolMessage> for PublishEvent { |
||||
type Error = ProtocolError; |
||||
fn try_from(msg: ProtocolMessage) -> Result<Self, Self::Error> { |
||||
let req: ClientRequestContentV0 = msg.try_into()?; |
||||
if let ClientRequestContentV0::PublishEvent(a) = req { |
||||
Ok(a) |
||||
} else { |
||||
log_debug!("INVALID {:?}", req); |
||||
Err(ProtocolError::InvalidValue) |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl From<PublishEvent> for ProtocolMessage { |
||||
fn from(msg: PublishEvent) -> ProtocolMessage { |
||||
let overlay = msg.1.unwrap(); |
||||
ProtocolMessage::from_client_request_v0(ClientRequestContentV0::PublishEvent(msg), overlay) |
||||
} |
||||
} |
||||
|
||||
impl Actor<'_, PublishEvent, ()> {} |
||||
|
||||
#[async_trait::async_trait] |
||||
impl EActor for Actor<'_, PublishEvent, ()> { |
||||
async fn respond( |
||||
&mut self, |
||||
msg: ProtocolMessage, |
||||
fsm: Arc<Mutex<NoiseFSM>>, |
||||
) -> Result<(), ProtocolError> { |
||||
let req = PublishEvent::try_from(msg)?; |
||||
|
||||
//TODO implement all the server side logic
|
||||
|
||||
let res: Result<(), ServerError> = Ok(()); |
||||
|
||||
fsm.lock() |
||||
.await |
||||
.send_in_reply_to(res.into(), self.id()) |
||||
.await?; |
||||
Ok(()) |
||||
} |
||||
} |
@ -1,343 +0,0 @@ |
||||
/* |
||||
* Copyright (c) 2022-2024 Niko Bonnieure, Par le Peuple, NextGraph.org developers |
||||
* All rights reserved. |
||||
* Licensed under the Apache License, Version 2.0 |
||||
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
|
||||
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
|
||||
* at your option. All files in the project carrying such |
||||
* notice may not be copied, modified, or distributed except |
||||
* according to those terms. |
||||
*/ |
||||
|
||||
//! Connection to a Broker, can be local or remote.
|
||||
//! If remote, it will use a Stream and Sink of framed messages
|
||||
//! This is the trait
|
||||
//!
|
||||
|
||||
use futures::channel::mpsc; |
||||
use futures::{ |
||||
ready, select, |
||||
stream::Stream, |
||||
task::{Context, Poll}, |
||||
Future, FutureExt, |
||||
}; |
||||
use std::pin::Pin; |
||||
use std::{collections::HashSet, fmt::Debug}; |
||||
|
||||
use crate::errors::*; |
||||
use crate::types::*; |
||||
use async_broadcast::{broadcast, Receiver}; |
||||
use futures::{pin_mut, stream, Sink, SinkExt, StreamExt}; |
||||
use ng_repo::log::*; |
||||
use ng_repo::object::*; |
||||
use ng_repo::store::*; |
||||
use ng_repo::types::*; |
||||
use ng_repo::utils::*; |
||||
|
||||
#[async_trait::async_trait] |
||||
pub trait BrokerConnection { |
||||
type OC: BrokerConnection; |
||||
type BlockStream: Stream<Item = Block>; |
||||
|
||||
async fn close(&mut self); |
||||
|
||||
async fn add_user( |
||||
&mut self, |
||||
user_id: PubKey, |
||||
admin_user_pk: PrivKey, |
||||
) -> Result<(), ProtocolError>; |
||||
|
||||
async fn del_user(&mut self, user_id: PubKey, admin_user_pk: PrivKey); |
||||
|
||||
async fn add_client(&mut self, client_id: ClientId, user_pk: PrivKey); |
||||
|
||||
async fn del_client(&mut self, client_id: ClientId, user_pk: PrivKey); |
||||
|
||||
async fn overlay_connect( |
||||
&mut self, |
||||
repo: &RepoLink, |
||||
public: bool, |
||||
) -> Result<OverlayConnectionClient<Self::OC>, ProtocolError>; |
||||
|
||||
// TODO: remove those 4 functions from trait. they are used internally only. should not be exposed to end-user
|
||||
async fn process_overlay_request( |
||||
&mut self, |
||||
overlay: OverlayId, |
||||
request: BrokerOverlayRequestContentV0, |
||||
) -> Result<(), ProtocolError>; |
||||
|
||||
async fn process_overlay_request_stream_response( |
||||
&mut self, |
||||
overlay: OverlayId, |
||||
request: BrokerOverlayRequestContentV0, |
||||
) -> Result<Pin<Box<Self::BlockStream>>, ProtocolError>; |
||||
|
||||
async fn process_overlay_request_objectid_response( |
||||
&mut self, |
||||
overlay: OverlayId, |
||||
request: BrokerOverlayRequestContentV0, |
||||
) -> Result<ObjectId, ProtocolError>; |
||||
|
||||
async fn process_overlay_connect( |
||||
&mut self, |
||||
repo_link: &RepoLink, |
||||
public: bool, |
||||
) -> Result<OverlayId, ProtocolError> { |
||||
let overlay: OverlayId = match public { |
||||
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()), |
||||
false => { |
||||
let key: [u8; blake3::OUT_LEN] = blake3::derive_key( |
||||
"NextGraph OverlayId BLAKE3 key", |
||||
repo_link.secret().slice(), |
||||
); |
||||
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice()); |
||||
Digest::Blake3Digest32(*keyed_hash.as_bytes()) |
||||
} |
||||
}; |
||||
|
||||
let res = self |
||||
.process_overlay_request( |
||||
overlay, |
||||
BrokerOverlayRequestContentV0::OverlayConnect(OverlayConnect::V0()), |
||||
) |
||||
.await; |
||||
|
||||
match res { |
||||
Err(e) => { |
||||
if e == ProtocolError::OverlayNotJoined { |
||||
log_debug!("OverlayNotJoined"); |
||||
let res2 = self |
||||
.process_overlay_request( |
||||
overlay, |
||||
BrokerOverlayRequestContentV0::OverlayJoin(OverlayJoin::V0( |
||||
OverlayJoinV0 { |
||||
secret: repo_link.secret(), |
||||
peers: repo_link.peers(), |
||||
repo_pubkey: Some(repo_link.id()), //TODO if we know we are connecting to a core node, we can pass None here
|
||||
}, |
||||
)), |
||||
) |
||||
.await?; |
||||
} else { |
||||
return Err(e); |
||||
} |
||||
} |
||||
Ok(()) => {} |
||||
} |
||||
|
||||
log_debug!("OverlayConnectionClient ready"); |
||||
Ok(overlay) |
||||
} |
||||
} |
||||
|
||||
pub struct OverlayConnectionClient<'a, T> |
||||
where |
||||
T: BrokerConnection, |
||||
{ |
||||
broker: &'a mut T, |
||||
overlay: OverlayId, |
||||
repo_link: RepoLink, |
||||
} |
||||
|
||||
impl<'a, T> OverlayConnectionClient<'a, T> |
||||
where |
||||
T: BrokerConnection, |
||||
{ |
||||
pub fn create( |
||||
broker: &'a mut T, |
||||
overlay: OverlayId, |
||||
repo_link: RepoLink, |
||||
) -> OverlayConnectionClient<'a, T> { |
||||
OverlayConnectionClient { |
||||
broker, |
||||
repo_link, |
||||
overlay, |
||||
} |
||||
} |
||||
|
||||
pub fn overlay(repo_link: &RepoLink, public: bool) -> OverlayId { |
||||
let overlay: OverlayId = match public { |
||||
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()), |
||||
false => { |
||||
let key: [u8; blake3::OUT_LEN] = blake3::derive_key( |
||||
"NextGraph OverlayId BLAKE3 key", |
||||
repo_link.secret().slice(), |
||||
); |
||||
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice()); |
||||
Digest::Blake3Digest32(*keyed_hash.as_bytes()) |
||||
} |
||||
}; |
||||
overlay |
||||
} |
||||
|
||||
pub async fn sync_branch( |
||||
&mut self, |
||||
heads: Vec<ObjectId>, |
||||
known_heads: Vec<ObjectId>, |
||||
known_commits: BloomFilter, |
||||
) -> Result<Pin<Box<T::BlockStream>>, ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request_stream_response( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::BranchSyncReq(BranchSyncReq::V0(BranchSyncReqV0 { |
||||
heads, |
||||
known_heads, |
||||
known_commits, |
||||
})), |
||||
) |
||||
.await |
||||
} |
||||
|
||||
pub fn leave(&self) {} |
||||
|
||||
pub fn topic_connect(&self, id: TopicId) -> TopicSubscription<T> { |
||||
let (s, mut r1) = broadcast(128); // FIXME this should be done only once, in the Broker
|
||||
TopicSubscription { |
||||
id, |
||||
overlay_cnx: self, |
||||
event_stream: r1.clone(), |
||||
} |
||||
} |
||||
|
||||
pub async fn delete_object(&mut self, id: ObjectId) -> Result<(), ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::ObjectDel(ObjectDel::V0(ObjectDelV0 { id })), |
||||
) |
||||
.await |
||||
} |
||||
|
||||
pub async fn pin_object(&mut self, id: ObjectId) -> Result<(), ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::ObjectPin(ObjectPin::V0(ObjectPinV0 { id })), |
||||
) |
||||
.await |
||||
} |
||||
|
||||
pub async fn unpin_object(&mut self, id: ObjectId) -> Result<(), ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::ObjectUnpin(ObjectUnpin::V0(ObjectUnpinV0 { id })), |
||||
) |
||||
.await |
||||
} |
||||
|
||||
pub async fn copy_object( |
||||
&mut self, |
||||
id: ObjectId, |
||||
expiry: Option<Timestamp>, |
||||
) -> Result<ObjectId, ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request_objectid_response( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::ObjectCopy(ObjectCopy::V0(ObjectCopyV0 { |
||||
id, |
||||
expiry, |
||||
})), |
||||
) |
||||
.await |
||||
} |
||||
|
||||
pub async fn get_block( |
||||
&mut self, |
||||
id: BlockId, |
||||
include_children: bool, |
||||
topic: Option<PubKey>, |
||||
) -> Result<Pin<Box<T::BlockStream>>, ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request_stream_response( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::BlockGet(BlockGet::V0(BlockGetV0 { |
||||
id, |
||||
include_children, |
||||
topic, |
||||
})), |
||||
) |
||||
.await |
||||
} |
||||
|
||||
pub async fn get_object( |
||||
&mut self, |
||||
id: ObjectId, |
||||
topic: Option<PubKey>, |
||||
) -> Result<Object, ProtocolError> { |
||||
let mut blockstream = self.get_block(id, true, topic).await?; |
||||
let mut store = HashMapBlockStorage::new(); |
||||
while let Some(block) = blockstream.next().await { |
||||
store.put(&block).unwrap(); |
||||
} |
||||
Object::load(id, None, &store).map_err(|e| match e { |
||||
ObjectParseError::MissingBlocks(_missing) => ProtocolError::MissingBlocks, |
||||
_ => ProtocolError::ObjectParseError, |
||||
}) |
||||
} |
||||
|
||||
pub async fn put_block(&mut self, block: &Block) -> Result<BlockId, ProtocolError> { |
||||
self.broker |
||||
.process_overlay_request( |
||||
self.overlay, |
||||
BrokerOverlayRequestContentV0::BlockPut(BlockPut::V0(block.clone())), |
||||
) |
||||
.await?; |
||||
Ok(block.id()) |
||||
} |
||||
|
||||
// TODO maybe implement a put_block_with_children ? that would behave like put_object, but taking in a parent Blockk instead of a content
|
||||
|
||||
pub async fn put_object( |
||||
&mut self, |
||||
content: ObjectContent, |
||||
deps: Vec<ObjectId>, |
||||
expiry: Option<Timestamp>, |
||||
max_object_size: usize, |
||||
repo_pubkey: PubKey, |
||||
repo_secret: SymKey, |
||||
) -> Result<ObjectId, ProtocolError> { |
||||
let obj = Object::new( |
||||
content, |
||||
deps, |
||||
expiry, |
||||
max_object_size, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
); |
||||
log_debug!("object has {} blocks", obj.blocks().len()); |
||||
let mut deduplicated: HashSet<ObjectId> = HashSet::new(); |
||||
for block in obj.blocks() { |
||||
let id = block.id(); |
||||
if deduplicated.get(&id).is_none() { |
||||
let _ = self.put_block(block).await?; |
||||
deduplicated.insert(id); |
||||
} |
||||
} |
||||
Ok(obj.id()) |
||||
} |
||||
} |
||||
|
||||
pub struct TopicSubscription<'a, T> |
||||
where |
||||
T: BrokerConnection, |
||||
{ |
||||
id: TopicId, |
||||
overlay_cnx: &'a OverlayConnectionClient<'a, T>, |
||||
event_stream: Receiver<Event>, |
||||
} |
||||
|
||||
impl<'a, T> TopicSubscription<'a, T> |
||||
where |
||||
T: BrokerConnection, |
||||
{ |
||||
pub fn unsubscribe(&self) {} |
||||
|
||||
pub fn disconnect(&self) {} |
||||
|
||||
pub fn get_branch_heads(&self) {} |
||||
|
||||
pub fn get_event_stream(&self) -> &Receiver<Event> { |
||||
&self.event_stream |
||||
} |
||||
} |
@ -1,587 +0,0 @@ |
||||
fn block_size() -> usize { |
||||
store_max_value_size() |
||||
//store_valid_value_size(0)
|
||||
} |
||||
|
||||
async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpriv_key: PrivKey) { |
||||
fn add_obj( |
||||
content: ObjectContent, |
||||
deps: Vec<ObjectId>, |
||||
expiry: Option<Timestamp>, |
||||
repo_pubkey: PubKey, |
||||
repo_secret: SymKey, |
||||
store: &mut impl BlockStorage, |
||||
) -> ObjectRef { |
||||
let max_object_size = 4000; |
||||
let obj = Object::new( |
||||
content, |
||||
deps, |
||||
expiry, |
||||
max_object_size, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
); |
||||
//log_debug!(">>> add_obj");
|
||||
log_debug!(" id: {}", obj.id()); |
||||
//log_debug!(" deps: {:?}", obj.deps());
|
||||
obj.save(store).unwrap(); |
||||
obj.reference().unwrap() |
||||
} |
||||
|
||||
fn add_commit( |
||||
branch: ObjectRef, |
||||
author_privkey: PrivKey, |
||||
author_pubkey: PubKey, |
||||
seq: u32, |
||||
deps: Vec<ObjectRef>, |
||||
acks: Vec<ObjectRef>, |
||||
body_ref: ObjectRef, |
||||
repo_pubkey: PubKey, |
||||
repo_secret: SymKey, |
||||
store: &mut impl BlockStorage, |
||||
) -> ObjectRef { |
||||
let mut obj_deps: Vec<ObjectId> = vec![]; |
||||
obj_deps.extend(deps.iter().map(|r| r.id)); |
||||
obj_deps.extend(acks.iter().map(|r| r.id)); |
||||
|
||||
let obj_ref = ObjectRef { |
||||
id: ObjectId::Blake3Digest32([1; 32]), |
||||
key: SymKey::ChaCha20Key([2; 32]), |
||||
}; |
||||
let refs = vec![obj_ref]; |
||||
let metadata = vec![5u8; 55]; |
||||
let expiry = None; |
||||
|
||||
let commit = Commit::new( |
||||
author_privkey, |
||||
author_pubkey, |
||||
seq, |
||||
branch, |
||||
deps, |
||||
acks, |
||||
refs, |
||||
metadata, |
||||
body_ref, |
||||
expiry, |
||||
) |
||||
.unwrap(); |
||||
//log_debug!("commit: {}", commit.id().unwrap());
|
||||
add_obj( |
||||
ObjectContent::Commit(commit), |
||||
obj_deps, |
||||
expiry, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
store, |
||||
) |
||||
} |
||||
|
||||
fn add_body_branch( |
||||
branch: Branch, |
||||
repo_pubkey: PubKey, |
||||
repo_secret: SymKey, |
||||
store: &mut impl BlockStorage, |
||||
) -> ObjectRef { |
||||
let deps = vec![]; |
||||
let expiry = None; |
||||
let body = CommitBody::Branch(branch); |
||||
//log_debug!("body: {:?}", body);
|
||||
add_obj( |
||||
ObjectContent::CommitBody(body), |
||||
deps, |
||||
expiry, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
store, |
||||
) |
||||
} |
||||
|
||||
fn add_body_trans( |
||||
deps: Vec<ObjectId>, |
||||
repo_pubkey: PubKey, |
||||
repo_secret: SymKey, |
||||
store: &mut impl BlockStorage, |
||||
) -> ObjectRef { |
||||
let expiry = None; |
||||
let content = [7u8; 777].to_vec(); |
||||
let body = CommitBody::Transaction(Transaction::V0(content)); |
||||
//log_debug!("body: {:?}", body);
|
||||
add_obj( |
||||
ObjectContent::CommitBody(body), |
||||
deps, |
||||
expiry, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
store, |
||||
) |
||||
} |
||||
|
||||
fn add_body_ack( |
||||
deps: Vec<ObjectId>, |
||||
repo_pubkey: PubKey, |
||||
repo_secret: SymKey, |
||||
store: &mut impl BlockStorage, |
||||
) -> ObjectRef { |
||||
let expiry = None; |
||||
let body = CommitBody::Ack(Ack::V0()); |
||||
//log_debug!("body: {:?}", body);
|
||||
add_obj( |
||||
ObjectContent::CommitBody(body), |
||||
deps, |
||||
expiry, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
store, |
||||
) |
||||
} |
||||
|
||||
let mut store = HashMapBlockStorage::new(); |
||||
let mut rng = OsRng {}; |
||||
|
||||
// repo
|
||||
|
||||
let repo_keypair: Keypair = Keypair::generate(&mut rng); |
||||
// log_debug!(
|
||||
// "repo private key: ({}) {:?}",
|
||||
// repo_keypair.secret.as_bytes().len(),
|
||||
// repo_keypair.secret.as_bytes()
|
||||
// );
|
||||
// log_debug!(
|
||||
// "repo public key: ({}) {:?}",
|
||||
// repo_keypair.public.as_bytes().len(),
|
||||
// repo_keypair.public.as_bytes()
|
||||
// );
|
||||
let _repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes()); |
||||
let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes()); |
||||
let repo_secret = SymKey::ChaCha20Key([9; 32]); |
||||
|
||||
let repolink = RepoLink::V0(RepoLinkV0 { |
||||
id: repo_pubkey, |
||||
secret: repo_secret, |
||||
peers: vec![], |
||||
}); |
||||
|
||||
// branch
|
||||
|
||||
let branch_keypair: Keypair = Keypair::generate(&mut rng); |
||||
//log_debug!("branch public key: {:?}", branch_keypair.public.as_bytes());
|
||||
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes()); |
||||
|
||||
let member_keypair: Keypair = Keypair::generate(&mut rng); |
||||
//log_debug!("member public key: {:?}", member_keypair.public.as_bytes());
|
||||
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes()); |
||||
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes()); |
||||
|
||||
let metadata = [66u8; 64].to_vec(); |
||||
let commit_types = vec![CommitType::Ack, CommitType::Transaction]; |
||||
let secret = SymKey::ChaCha20Key([0; 32]); |
||||
|
||||
let member = MemberV0::new(member_pubkey, commit_types, metadata.clone()); |
||||
let members = vec![member]; |
||||
let mut quorum = HashMap::new(); |
||||
quorum.insert(CommitType::Transaction, 3); |
||||
let ack_delay = RelTime::Minutes(3); |
||||
let tags = [99u8; 32].to_vec(); |
||||
let branch = Branch::new( |
||||
branch_pubkey, |
||||
branch_pubkey, |
||||
secret, |
||||
members, |
||||
quorum, |
||||
ack_delay, |
||||
tags, |
||||
metadata, |
||||
); |
||||
//log_debug!("branch: {:?}", branch);
|
||||
|
||||
log_debug!("branch deps/acks:"); |
||||
log_debug!(""); |
||||
log_debug!(" br"); |
||||
log_debug!(" / \\"); |
||||
log_debug!(" t1 t2"); |
||||
log_debug!(" / \\ / \\"); |
||||
log_debug!(" a3 t4<--t5-->(t1)"); |
||||
log_debug!(" / \\"); |
||||
log_debug!(" a6 a7"); |
||||
log_debug!(""); |
||||
|
||||
// commit bodies
|
||||
|
||||
let branch_body = add_body_branch( |
||||
branch.clone(), |
||||
repo_pubkey.clone(), |
||||
repo_secret.clone(), |
||||
&mut store, |
||||
); |
||||
let ack_body = add_body_ack(vec![], repo_pubkey, repo_secret, &mut store); |
||||
let trans_body = add_body_trans(vec![], repo_pubkey, repo_secret, &mut store); |
||||
|
||||
// create & add commits to store
|
||||
|
||||
log_debug!(">> br"); |
||||
let br = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
0, |
||||
vec![], |
||||
vec![], |
||||
branch_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> t1"); |
||||
let t1 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
1, |
||||
vec![br], |
||||
vec![], |
||||
trans_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> t2"); |
||||
let t2 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
2, |
||||
vec![br], |
||||
vec![], |
||||
trans_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> a3"); |
||||
let a3 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
3, |
||||
vec![t1], |
||||
vec![], |
||||
ack_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> t4"); |
||||
let t4 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
4, |
||||
vec![t2], |
||||
vec![t1], |
||||
trans_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> t5"); |
||||
let t5 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
5, |
||||
vec![t1, t2], |
||||
vec![t4], |
||||
trans_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> a6"); |
||||
let a6 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
6, |
||||
vec![t4], |
||||
vec![], |
||||
ack_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!(">> a7"); |
||||
let a7 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
7, |
||||
vec![t4], |
||||
vec![], |
||||
ack_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
let mut public_overlay_cnx = cnx |
||||
.overlay_connect(&repolink, true) |
||||
.await |
||||
.expect("overlay_connect failed"); |
||||
|
||||
// Sending everything to the broker
|
||||
for (v) in store.get_all() { |
||||
//log_debug!("SENDING {}", k);
|
||||
let _ = public_overlay_cnx |
||||
.put_block(&v) |
||||
.await |
||||
.expect("put_block failed"); |
||||
} |
||||
|
||||
// Now emptying the local store of the client, and adding only 1 commit into it (br)
|
||||
// we also have received an commit (t5) but we don't know what to do with it...
|
||||
let mut store = HashMapBlockStorage::new(); |
||||
|
||||
let br = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
0, |
||||
vec![], |
||||
vec![], |
||||
branch_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
let t5 = add_commit( |
||||
branch_body, |
||||
member_privkey, |
||||
member_pubkey, |
||||
5, |
||||
vec![t1, t2], |
||||
vec![t4], |
||||
trans_body, |
||||
repo_pubkey, |
||||
repo_secret, |
||||
&mut store, |
||||
); |
||||
|
||||
log_debug!("LOCAL STORE HAS {} BLOCKS", store.get_len()); |
||||
|
||||
// Let's pretend that we know that the head of the branch in the broker is at commits a6 and a7.
|
||||
// normally it would be the pub/sub that notifies us of those heads.
|
||||
// now we want to synchronize with the broker.
|
||||
|
||||
let mut filter = Filter::new(FilterBuilder::new(10, 0.01)); |
||||
for commit_ref in [br, t5] { |
||||
match commit_ref.id { |
||||
ObjectId::Blake3Digest32(d) => filter.add(&d), |
||||
} |
||||
} |
||||
let cfg = filter.config(); |
||||
|
||||
let known_commits = BloomFilter { |
||||
k: cfg.hashes, |
||||
f: filter.get_u8_array().to_vec(), |
||||
}; |
||||
|
||||
let known_heads = [br.id]; |
||||
|
||||
let remote_heads = [a6.id, a7.id]; |
||||
|
||||
let mut synced_blocks_stream = public_overlay_cnx |
||||
.sync_branch(remote_heads.to_vec(), known_heads.to_vec(), known_commits) |
||||
.await |
||||
.expect("sync_branch failed"); |
||||
|
||||
let mut i = 0; |
||||
while let Some(b) = synced_blocks_stream.next().await { |
||||
log_debug!("GOT BLOCK {}", b.id()); |
||||
store.put(&b); |
||||
i += 1; |
||||
} |
||||
|
||||
log_debug!("SYNCED {} BLOCKS", i); |
||||
|
||||
log_debug!("LOCAL STORE HAS {} BLOCKS", store.get_len()); |
||||
|
||||
// now the client can verify the DAG and each commit. Then update its list of heads.
|
||||
} |
||||
|
||||
async fn test( |
||||
cnx: &mut impl BrokerConnection, |
||||
pub_key: PubKey, |
||||
priv_key: PrivKey, |
||||
) -> Result<(), ProtocolError> { |
||||
cnx.add_user(PubKey::Ed25519PubKey([1; 32]), priv_key) |
||||
.await?; |
||||
|
||||
cnx.add_user(pub_key, priv_key).await?; |
||||
//.expect("add_user 2 (myself) failed");
|
||||
|
||||
assert_eq!( |
||||
cnx.add_user(PubKey::Ed25519PubKey([1; 32]), priv_key) |
||||
.await |
||||
.err() |
||||
.unwrap(), |
||||
ProtocolError::UserAlreadyExists |
||||
); |
||||
|
||||
let repo = RepoLink::V0(RepoLinkV0 { |
||||
id: PubKey::Ed25519PubKey([1; 32]), |
||||
secret: SymKey::ChaCha20Key([0; 32]), |
||||
peers: vec![], |
||||
}); |
||||
let mut public_overlay_cnx = cnx.overlay_connect(&repo, true).await?; |
||||
|
||||
log_debug!("put_block"); |
||||
|
||||
let my_block_id = public_overlay_cnx |
||||
.put_block(&Block::new( |
||||
vec![], |
||||
ObjectDeps::ObjectIdList(vec![]), |
||||
None, |
||||
vec![27; 150], |
||||
None, |
||||
)) |
||||
.await?; |
||||
|
||||
log_debug!("added block_id to store {}", my_block_id); |
||||
|
||||
let object_id = public_overlay_cnx |
||||
.put_object( |
||||
ObjectContent::File(File::V0(FileV0 { |
||||
content_type: vec![], |
||||
metadata: vec![], |
||||
content: vec![48; 69000], |
||||
})), |
||||
vec![], |
||||
None, |
||||
block_size(), |
||||
repo.id(), |
||||
repo.secret(), |
||||
) |
||||
.await?; |
||||
|
||||
log_debug!("added object_id to store {}", object_id); |
||||
|
||||
let mut my_block_stream = public_overlay_cnx |
||||
.get_block(my_block_id, true, None) |
||||
.await?; |
||||
//.expect("get_block failed");
|
||||
|
||||
while let Some(b) = my_block_stream.next().await { |
||||
log_debug!("GOT BLOCK {}", b.id()); |
||||
} |
||||
|
||||
let mut my_object_stream = public_overlay_cnx.get_block(object_id, true, None).await?; |
||||
//.expect("get_block for object failed");
|
||||
|
||||
while let Some(b) = my_object_stream.next().await { |
||||
log_debug!("GOT BLOCK {}", b.id()); |
||||
} |
||||
|
||||
let object = public_overlay_cnx.get_object(object_id, None).await?; |
||||
//.expect("get_object failed");
|
||||
|
||||
log_debug!("GOT OBJECT with ID {}", object.id()); |
||||
|
||||
// let object_id = public_overlay_cnx
|
||||
// .copy_object(object_id, Some(now_timestamp() + 60))
|
||||
// .await
|
||||
// .expect("copy_object failed");
|
||||
|
||||
// log_debug!("COPIED OBJECT to OBJECT ID {}", object_id);
|
||||
|
||||
public_overlay_cnx.delete_object(object_id).await?; |
||||
//.expect("delete_object failed");
|
||||
|
||||
let res = public_overlay_cnx |
||||
.get_object(object_id, None) |
||||
.await |
||||
.unwrap_err(); |
||||
|
||||
log_debug!("result from get object after delete: {}", res); |
||||
assert_eq!(res, ProtocolError::NotFound); |
||||
|
||||
//TODO test pin/unpin
|
||||
|
||||
// TEST BRANCH SYNC
|
||||
|
||||
test_sync(cnx, pub_key, priv_key).await; |
||||
|
||||
Ok(()) |
||||
} |
||||
|
||||
async fn test_local_connection() { |
||||
log_debug!("===== TESTING LOCAL API ====="); |
||||
|
||||
let root = tempfile::Builder::new().prefix("ngcli").tempdir().unwrap(); |
||||
let master_key: [u8; 32] = [0; 32]; |
||||
std::fs::create_dir_all(root.path()).unwrap(); |
||||
log_debug!("{}", root.path().to_str().unwrap()); |
||||
let store = LmdbKCVStorage::open(root.path(), master_key); |
||||
|
||||
//let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker");
|
||||
|
||||
let (priv_key, pub_key) = generate_keypair(); |
||||
|
||||
// let mut cnx = server.local_connection(pub_key);
|
||||
|
||||
// test(&mut cnx, pub_key, priv_key).await;
|
||||
} |
||||
|
||||
async fn test_remote_connection(url: &str) { |
||||
log_debug!("===== TESTING REMOTE API ====="); |
||||
|
||||
let (priv_key, pub_key) = generate_keypair(); |
||||
|
||||
// open cnx
|
||||
|
||||
// test(&mut cnx, pub_key, priv_key).await;
|
||||
} |
||||
|
||||
#[cfg(test)] |
||||
mod test { |
||||
|
||||
use crate::{test_local_connection, test_remote_connection}; |
||||
|
||||
#[async_std::test] |
||||
pub async fn test_local_cnx() {} |
||||
|
||||
use async_std::task; |
||||
use ng_broker::server_ws::*; |
||||
use ng_net::utils::gen_dh_keys; |
||||
use ng_net::WS_PORT; |
||||
use ng_repo::log::*; |
||||
use ng_repo::types::PubKey; |
||||
|
||||
#[async_std::test] |
||||
pub async fn test_remote_cnx() -> Result<(), Box<dyn std::error::Error>> { |
||||
let keys = gen_dh_keys(); |
||||
// log_debug!("Public key of node: {:?}", keys.1);
|
||||
// log_debug!("Private key of node: {:?}", keys.0.as_slice());
|
||||
|
||||
log_debug!("Public key of node: {}", keys.1); |
||||
log_debug!("Private key of node: {}", keys.0); |
||||
|
||||
let thr = task::spawn(run_server_accept_one("127.0.0.1", WS_PORT, keys.0, pubkey)); |
||||
|
||||
// time for the server to start
|
||||
std::thread::sleep(std::time::Duration::from_secs(2)); |
||||
|
||||
test_remote_connection("ws://127.0.0.1:3012"); |
||||
|
||||
thr.await; |
||||
|
||||
Ok(()) |
||||
} |
||||
} |
Loading…
Reference in new issue