@ -10,6 +10,9 @@
//! Repo object (on heap) to handle a Repository
use crate ::types ::* ;
use crate ::user_storage ::repo ::RepoStorage ;
use ng_net ::actor ::SoS ;
use ng_net ::broker ::{ Broker , BROKER } ;
use ng_repo ::log ::* ;
use ng_repo ::object ::Object ;
use ng_repo ::{
@ -32,7 +35,7 @@ use core::fmt;
#[ cfg(not(target_family = " wasm " )) ]
use crate ::rocksdb_user_storage ::RocksDbUserStorage ;
use crate ::user_storage ::{ InMemoryUserStorage , UserStorage } ;
use async_std ::sync ::Mutex ;
use async_std ::sync ::{ Mutex , RwLockWriteGuard } ;
use std ::{ collections ::HashMap , path ::PathBuf , sync ::Arc } ;
use ng_net ::{
@ -50,7 +53,7 @@ pub struct Verifier {
pub config : VerifierConfig ,
pub connected_server_id : Option < PubKey > ,
graph_dataset : Option < oxigraph ::store ::Store > ,
user_storage : Option < Box < dyn UserStorage > > ,
user_storage : Option < Arc < Box < dyn UserStorage > > > ,
block_storage : Option < Arc < std ::sync ::RwLock < dyn BlockStorage + Send + Sync > > > ,
last_seq_num : u64 ,
peer_id : PubKey ,
@ -58,8 +61,9 @@ pub struct Verifier {
last_reservation : SystemTime ,
stores : HashMap < OverlayId , Arc < Store > > ,
repos : HashMap < RepoId , Repo > ,
topics : HashMap < ( OverlayId , TopicId ) , ( RepoId , BranchId ) > ,
/// only used for InMemory type, to store the outbox
in_memory_outbox : Vec < Event > ,
in_memory_outbox : Vec < EventOutboxStorage > ,
}
impl fmt ::Debug for Verifier {
@ -69,6 +73,12 @@ impl fmt::Debug for Verifier {
}
}
#[ derive(Clone, Debug, Serialize, Deserialize) ]
struct EventOutboxStorage {
event : Event ,
overlay : OverlayId ,
}
impl Verifier {
#[ allow(deprecated) ]
#[ cfg(any(test, feature = " testing " )) ]
@ -96,14 +106,44 @@ impl Verifier {
last_reservation : SystemTime ::UNIX_EPOCH ,
stores : HashMap ::new ( ) ,
repos : HashMap ::new ( ) ,
topics : HashMap ::new ( ) ,
in_memory_outbox : vec ! [ ] ,
}
}
pub fn get_store_mut ( & mut self , store_repo : & StoreRepo ) -> Arc < Store > {
pub fn load ( & mut self ) -> Result < ( ) , NgError > {
if self . is_persistent ( ) & & self . user_storage . is_some ( ) & & self . block_storage . is_some ( ) {
let user_storage = Arc ::clone ( self . user_storage . as_ref ( ) . unwrap ( ) ) ;
let stores = user_storage . get_all_store_and_repo_ids ( ) ? ;
for ( store , repos ) in stores . iter ( ) {
let repo = user_storage
. load_store ( store , Arc ::clone ( self . block_storage . as_ref ( ) . unwrap ( ) ) ) ? ;
self . stores . insert (
store . overlay_id_for_storage_purpose ( ) ,
Arc ::clone ( & repo . store ) ,
) ;
let store = Arc ::clone ( & repo . store ) ;
self . add_repo_without_saving ( repo ) ;
for repo_id in repos {
let repo = user_storage . load_repo ( repo_id , Arc ::clone ( & store ) ) ? ;
self . add_repo_without_saving ( repo ) ;
}
}
}
Ok ( ( ) )
}
fn is_persistent ( & self ) -> bool {
self . config . config_type . is_persistent ( )
}
pub fn get_store_or_load ( & mut self , store_repo : & StoreRepo ) -> Arc < Store > {
let overlay_id = store_repo . overlay_id_for_storage_purpose ( ) ;
let store = self . stores . entry ( overlay_id ) . or_insert_with ( | | {
// FIXME: get store_readcap from user storage
// FIXME: get store_readcap and store_overlay_branch_readcap from user storage
let store_readcap = ReadCap ::nil ( ) ;
let store_overlay_branch_readcap = ReadCap ::nil ( ) ;
let store = Store ::new (
@ -115,7 +155,7 @@ impl Verifier {
. block_storage
. as_ref ( )
. ok_or ( core ::fmt ::Error )
. expect ( "get_store_mut cannot be called on Remote Verifier" ) ,
. expect ( "get_store_or_load cannot be called on Remote Verifier" ) ,
) ,
) ;
Arc ::new ( store )
@ -127,8 +167,6 @@ impl Verifier {
& mut self ,
store_repo : & StoreRepo ,
mut repo : Repo ,
//read_cap: &ReadCap,
//overlay_read_cap: Option<&ReadCap>,
) -> Result < Repo , NgError > {
let read_cap = repo . read_cap . to_owned ( ) . unwrap ( ) ;
let overlay_read_cap = repo . overlay_branch_read_cap ( ) . cloned ( ) ;
@ -152,6 +190,7 @@ impl Verifier {
mut_store . set_read_caps ( read_cap , overlay_read_cap ) ;
let new_store = Arc ::new ( mut_store ) ;
let _ = self . stores . insert ( overlay_id , Arc ::clone ( & new_store ) ) ;
// TODO: store in user_storage
repo . store = new_store ;
//let _ = self.repos.insert(*store_repo.repo_id(), repo);
Ok ( repo )
@ -197,17 +236,14 @@ impl Verifier {
if self . stores . contains_key ( & overlay_id ) {
return ;
}
// TODO: store in user_storage
self . stores . insert ( overlay_id , store ) ;
}
pub ( crate ) fn new_event (
pub ( crate ) async fn new_event (
& mut self ,
//publisher: &PrivKey,
//seq: &mut u64,
commit : & Commit ,
additional_blocks : & Vec < BlockId > ,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
repo_id : RepoId ,
store_repo : & StoreRepo ,
) -> Result < ( ) , NgError > {
@ -215,53 +251,40 @@ impl Verifier {
self . reserve_more ( 1 ) ? ;
}
self . new_event_ ( commit , additional_blocks , repo_id , store_repo )
. await
}
fn new_event_ (
async fn new_event_ (
& mut self ,
//publisher: &PrivKey,
//seq: &mut u64,
commit : & Commit ,
additional_blocks : & Vec < BlockId > ,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
// store: &Store, // store could be omitted and a store repo ID would be given instead.
repo_id : RepoId ,
store_repo : & StoreRepo ,
) -> Result < ( ) , NgError > {
//let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
//let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
//self.get_store(store_repo)
let publisher = self . config . peer_priv_key . clone ( ) ;
self . last_seq_num + = 1 ;
let seq_num = self . last_seq_num ;
let repo = self . get_repo ( repo_id , store_repo ) ? ;
let event = Event ::new ( & publisher , seq_num , commit , additional_blocks , repo ) ? ;
self . send_or_save_event_to_outbox ( event ) ? ;
self . send_or_save_event_to_outbox ( event , repo . store . overlay_id )
. await ? ;
Ok ( ( ) )
}
fn new_event_with_repo_ (
async fn new_event_with_repo_ (
& mut self ,
//publisher: &PrivKey,
//seq: &mut u64,
commit : & Commit ,
additional_blocks : & Vec < BlockId > ,
//topic_id: TopicId,
//topic_priv_key: &BranchWriteCapSecret,
// store: &Store, // store could be omitted and a store repo ID would be given instead.
repo : & Repo ,
) -> Result < ( ) , NgError > {
//let topic_id = TopicId::nil(); // should be fetched from user storage, based on the Commit.branch
//let topic_priv_key = BranchWriteCapSecret::nil(); // should be fetched from user storage, based on repoId found in user storage (search by branchId)
//self.get_store(store_repo)
let publisher = self . config . peer_priv_key . clone ( ) ;
self . last_seq_num + = 1 ;
let seq_num = self . last_seq_num ;
let event = Event ::new ( & publisher , seq_num , commit , additional_blocks , repo ) ? ;
self . send_or_save_event_to_outbox ( event ) ? ;
self . send_or_save_event_to_outbox ( event , repo . store . overlay_id )
. await ? ;
Ok ( ( ) )
}
@ -273,7 +296,7 @@ impl Verifier {
Ok ( self . last_seq_num )
}
pub ( crate ) fn new_events_with_repo (
pub ( crate ) async fn new_events_with_repo (
& mut self ,
events : Vec < ( Commit , Vec < Digest > ) > ,
repo : & Repo ,
@ -285,12 +308,12 @@ impl Verifier {
self . reserve_more ( missing_count as u64 + 1 ) ? ;
}
for event in events {
self . new_event_with_repo_ ( & event . 0 , & event . 1 , repo ) ? ;
self . new_event_with_repo_ ( & event . 0 , & event . 1 , repo ) . await ? ;
}
Ok ( ( ) )
}
pub ( crate ) fn new_events (
pub ( crate ) async fn new_events (
& mut self ,
events : Vec < ( Commit , Vec < Digest > ) > ,
repo_id : RepoId ,
@ -303,7 +326,8 @@ impl Verifier {
self . reserve_more ( missing_count as u64 + 1 ) ? ;
}
for event in events {
self . new_event_ ( & event . 0 , & event . 1 , repo_id . clone ( ) , store_repo ) ? ;
self . new_event_ ( & event . 0 , & event . 1 , repo_id . clone ( ) , store_repo )
. await ? ;
}
Ok ( ( ) )
}
@ -325,23 +349,107 @@ impl Verifier {
self . take_some_peer_last_seq_numbers ( max ( at_least as u16 , qty ) )
}
fn send_or_save_event_to_outbox < ' a > ( & ' a mut self , event : Event ) -> Result < ( ) , NgError > {
fn take_events_from_outbox ( & mut self ) -> Result < Vec < EventOutboxStorage > , NgError > {
match & self . config . config_type {
VerifierConfigType ::JsSaveSession ( js ) = > {
let events_ser = ( js . outbox_read_function ) ( self . peer_id ) ? ;
let mut res = Vec ::with_capacity ( events_ser . len ( ) ) ;
for event_ser in events_ser {
let event = serde_bare ::from_slice ( & event_ser ) ? ;
res . push ( event ) ;
}
Ok ( res )
}
VerifierConfigType ::RocksDb ( path ) = > {
let mut path = path . clone ( ) ;
path . push ( format! ( "outbox{}" , self . peer_id . to_hash_string ( ) ) ) ;
let file = read ( path . clone ( ) ) ;
let mut res = vec! [ ] ;
match file {
Ok ( ser ) = > {
if ser . len ( ) > 0 {
let mut pos : usize = 0 ;
let usize_size = usize ::BITS as usize / 8 ;
loop {
let size = usize ::from_le_bytes (
ser [ pos .. pos + usize_size ]
. try_into ( )
. map_err ( | _ | NgError ::SerializationError ) ? ,
) ;
//log_info!("size={}", size);
pos + = usize_size ;
//let buff = &ser[pos..pos + size];
//log_info!("EVENT={:?}", buff.len());
let event = serde_bare ::from_slice ( & ser [ pos .. pos + size ] ) ? ;
//log_info!("EVENT_DESER={:?}", event);
res . push ( event ) ;
pos + = size ;
if pos > = ser . len ( ) {
break ;
}
}
}
}
Err ( _ ) = > { }
}
let _ = std ::fs ::remove_file ( path ) ;
Ok ( res )
}
VerifierConfigType ::Memory = > {
let res = self . in_memory_outbox . drain ( .. ) . collect ( ) ;
Ok ( res )
}
_ = > unimplemented! ( ) ,
}
}
async fn send_or_save_event_to_outbox < ' a > (
& ' a mut self ,
event : Event ,
overlay : OverlayId ,
) -> Result < ( ) , NgError > {
log_debug ! ( "========== EVENT {:03}: {}" , event . seq_num ( ) , event ) ;
if self . connected_server_id . is_some ( ) {
// send the events to the server already
// send the event to the server already
let broker = BROKER . write ( ) . await ;
let user = self . config . user_priv_key . to_pub ( ) ;
let remote = self . connected_server_id . as_ref ( ) . unwrap ( ) . to_owned ( ) ;
self . send_event ( event , & broker , & user , & remote , overlay )
. await ? ;
} else {
match & self . config . config_type {
VerifierConfigType ::JsSaveSession ( js ) = > {
let e = EventOutboxStorage { event , overlay } ;
( js . outbox_write_function ) (
self . peer_id ,
event . seq_num ( ) ,
serde_bare ::to_vec ( & event ) ? ,
e . e vent. seq_num ( ) ,
serde_bare ::to_vec ( & e ) ? ,
) ? ;
}
VerifierConfigType ::RocksDb ( path ) = > { }
VerifierConfigType ::RocksDb ( path ) = > {
let mut path = path . clone ( ) ;
std ::fs ::create_dir_all ( path . clone ( ) ) . unwrap ( ) ;
path . push ( format! ( "outbox{}" , self . peer_id . to_hash_string ( ) ) ) ;
let mut file = OpenOptions ::new ( )
. append ( true )
. create ( true )
. open ( path )
. map_err ( | _ | NgError ::IoError ) ? ;
let e = EventOutboxStorage { event , overlay } ;
let event_ser = serde_bare ::to_vec ( & e ) ? ;
//log_info!("EVENT size={}", event_ser.len());
//log_info!("EVENT {:?}", event_ser);
let size_ser = event_ser . len ( ) . to_le_bytes ( ) . to_vec ( ) ;
file . write_all ( & size_ser ) . map_err ( | _ | NgError ::IoError ) ? ;
file . flush ( ) . map_err ( | _ | NgError ::IoError ) ? ;
file . write_all ( & event_ser ) . map_err ( | _ | NgError ::IoError ) ? ;
file . flush ( ) . map_err ( | _ | NgError ::IoError ) ? ;
file . sync_data ( ) . map_err ( | _ | NgError ::IoError ) ? ;
}
VerifierConfigType ::Memory = > {
self . in_memory_outbox . push ( event ) ;
self . in_memory_outbox
. push ( EventOutboxStorage { event , overlay } ) ;
}
_ = > unimplemented! ( ) ,
}
@ -349,6 +457,107 @@ impl Verifier {
Ok ( ( ) )
}
async fn send_event < ' a > (
& mut self ,
event : Event ,
broker : & RwLockWriteGuard < ' a , Broker < ' a > > ,
user : & UserId ,
remote : & DirectPeerId ,
overlay : OverlayId ,
) -> Result < ( ) , NgError > {
let ( repo_id , branch_id ) = self
. topics
. get ( & ( overlay , * event . topic_id ( ) ) )
. ok_or ( NgError ::TopicNotFound ) ?
. to_owned ( ) ;
let opened_as_publisher ;
{
let repo = self . repos . get ( & repo_id ) . ok_or ( NgError ::RepoNotFound ) ? ;
opened_as_publisher = repo . branch_is_opened_as_publisher ( & branch_id ) ;
}
if ! opened_as_publisher {
let msg = RepoPinStatusReq ::V0 ( RepoPinStatusReqV0 {
hash : repo_id . into ( ) ,
overlay : Some ( overlay ) ,
} ) ;
match broker
. request ::< RepoPinStatusReq , RepoPinStatus > ( user , remote , msg )
. await
{
Err ( ProtocolError ::False ) | Err ( ProtocolError ::RepoAlreadyOpened ) = > {
// pinning the repo on the server broker
let pin_req ;
{
let repo = self . repos . get ( & repo_id ) . ok_or ( NgError ::RepoNotFound ) ? ;
pin_req = PinRepo ::from_repo ( repo , remote ) ;
}
if let Ok ( SoS ::Single ( opened ) ) = broker
. request ::< PinRepo , RepoOpened > ( user , remote , pin_req )
. await
{
self . repo_was_opened ( & repo_id , & opened ) ? ;
//TODO: check that in the returned opened_repo, the branch we are interested in has effectively been subscribed as publisher by the broker.
} else {
return Err ( NgError ::ProtocolError ) ;
}
}
Err ( _ ) = > return Err ( NgError ::ProtocolError ) ,
Ok ( SoS ::Single ( pin_status ) ) = > {
// checking that the branch is subscribed as publisher
if ! pin_status . is_topic_subscribed_as_publisher ( event . topic_id ( ) ) {
// we need to subscribe as publisher
let topic_sub ;
{
let repo = self . repos . get ( & repo_id ) . ok_or ( NgError ::RepoNotFound ) ? ;
let branch_info = repo . branch ( & branch_id ) ? ;
if branch_info . topic_priv_key . is_none ( ) {
return Err ( NgError ::PermissionDenied ) ;
}
topic_sub = TopicSub ::new ( repo , branch_info , Some ( remote ) ) ;
}
match broker
. request ::< TopicSub , TopicSubRes > ( user , remote , topic_sub )
. await
{
Ok ( _ ) = > {
// TODO, deal with heads
// update Repo locally
let repo =
self . repos . get_mut ( & repo_id ) . ok_or ( NgError ::RepoNotFound ) ? ;
repo . opened_branches . insert ( * event . topic_id ( ) , true ) ;
}
Err ( _ ) = > {
return Err ( NgError ::BrokerError ) ;
}
}
}
}
_ = > return Err ( NgError ::ActorError ) ,
}
// TODO: deal with received known_heads.
// DO a TopicSync
}
Ok ( ( ) )
}
pub async fn send_outbox ( & mut self ) -> Result < ( ) , NgError > {
let events = self . take_events_from_outbox ( ) ? ;
let broker = BROKER . write ( ) . await ;
let user = self . config . user_priv_key . to_pub ( ) ;
let remote = self
. connected_server_id
. as_ref ( )
. ok_or ( NgError ::NotConnected ) ?
. clone ( ) ;
for e in events {
self . send_event ( e . event , & broker , & user , & remote , e . overlay )
. await ? ;
}
Ok ( ( ) )
}
fn take_some_peer_last_seq_numbers ( & mut self , qty : u16 ) -> Result < ( ) , NgError > {
match & self . config . config_type {
VerifierConfigType ::JsSaveSession ( js ) = > {
@ -358,7 +567,7 @@ impl Verifier {
VerifierConfigType ::RocksDb ( path ) = > {
let mut path = path . clone ( ) ;
std ::fs ::create_dir_all ( path . clone ( ) ) . unwrap ( ) ;
path . push ( format! ( "lastseq{}" , self . peer_id . to_string ( ) ) ) ;
path . push ( format! ( "lastseq{}" , self . peer_id . to_hash_ string ( ) ) ) ;
log_debug ! ( "last_seq path {}" , path . display ( ) ) ;
let file = read ( path . clone ( ) ) ;
@ -376,26 +585,19 @@ impl Verifier {
OpenOptions ::new ( )
. write ( true )
. open ( path )
. map_err ( | _ | NgError ::Serialization Error) ? ,
. map_err ( | _ | NgError ::Io Error) ? ,
old_val ,
)
}
Err ( _ ) = > (
File ::create ( path ) . map_err ( | _ | NgError ::SerializationError ) ? ,
0 ,
) ,
Err ( _ ) = > ( File ::create ( path ) . map_err ( | _ | NgError ::IoError ) ? , 0 ) ,
} ;
if qty > 0 {
let new_val = val + qty as u64 ;
let spls = SessionPeerLastSeq ::V0 ( new_val ) ;
let ser = spls . ser ( ) ? ;
file_save
. write_all ( & ser )
. map_err ( | _ | NgError ::SerializationError ) ? ;
file_save . write_all ( & ser ) . map_err ( | _ | NgError ::IoError ) ? ;
file_save
. sync_data ( )
. map_err ( | _ | NgError ::SerializationError ) ? ;
file_save . sync_data ( ) . map_err ( | _ | NgError ::IoError ) ? ;
}
self . max_reserved_seq_num = val + qty as u64 ;
}
@ -448,12 +650,11 @@ impl Verifier {
_ = > unimplemented! ( ) , // can be WebRocksDb or RocksDb on wasm platforms
} ;
let peer_id = config . peer_priv_key . to_pub ( ) ;
let should_load_last_seq_num = config . config_type . should_load_last_seq_num ( ) ;
let mut verif = Verifier {
config ,
connected_server_id : None ,
graph_dataset : graph ,
user_storage : user ,
user_storage : user . map ( | u | Arc ::new ( u ) ) ,
block_storage : block ,
peer_id ,
last_reservation : SystemTime ::UNIX_EPOCH , // this is to avoid reserving 100 seq_nums at every start of a new session
@ -461,10 +662,11 @@ impl Verifier {
last_seq_num : 0 ,
stores : HashMap ::new ( ) ,
repos : HashMap ::new ( ) ,
topics : HashMap ::new ( ) ,
in_memory_outbox : vec ! [ ] ,
} ;
// this is important as it will load the last seq from storage
if should_load_last_seq_num {
if verif . config . config_type . should_load_last_seq_num ( ) {
verif . take_some_peer_last_seq_numbers ( 0 ) ? ;
verif . last_seq_num = verif . max_reserved_seq_num ;
}
@ -487,7 +689,54 @@ impl Verifier {
unimplemented! ( ) ;
}
pub fn new_store_default < ' a > (
fn add_repo_without_saving ( & mut self , repo : Repo ) {
self . add_repo_ ( repo ) ;
}
fn add_repo_and_save ( & mut self , repo : Repo ) -> & Repo {
let user_storage = self
. user_storage
. as_ref ( )
. map ( | us | Arc ::clone ( us ) )
. and_then ( | u | if self . is_persistent ( ) { Some ( u ) } else { None } ) ;
let repo_ref = self . add_repo_ ( repo ) ;
// save in user_storage
if user_storage . is_some ( ) {
let _ = user_storage . unwrap ( ) . save_repo ( repo_ref ) ;
}
repo_ref
}
fn add_repo_ ( & mut self , repo : Repo ) -> & Repo {
for ( branch_id , info ) in repo . branches . iter ( ) {
let overlay_id = repo . store . overlay_id . clone ( ) ;
let topic_id = info . topic . clone ( ) ;
let repo_id = repo . id . clone ( ) ;
let branch_id = branch_id . clone ( ) ;
assert_eq! (
self . topics
. insert ( ( overlay_id , topic_id ) , ( repo_id , branch_id ) ) ,
None
) ;
}
let repo_ref = self . repos . entry ( repo . id ) . or_insert ( repo ) ;
repo_ref
}
fn repo_was_opened (
& mut self ,
repo_id : & RepoId ,
opened_repo : & RepoOpened ,
) -> Result < ( ) , NgError > {
let repo = self . repos . get_mut ( repo_id ) . ok_or ( NgError ::RepoNotFound ) ? ;
for sub in opened_repo {
repo . opened_branches
. insert ( * sub . topic_id ( ) , sub . is_publisher ( ) ) ;
}
Ok ( ( ) )
}
pub async fn new_store_default < ' a > (
& ' a mut self ,
creator : & UserId ,
creator_priv_key : & PrivKey ,
@ -513,7 +762,7 @@ impl Verifier {
. block_storage
. as_ref ( )
. ok_or ( core ::fmt ::Error )
. expect ( "get_store_mut cannot be called on Remote Verifier" ) ,
. expect ( "get_store_or_load cannot be called on Remote Verifier" ) ,
) ,
) ;
Arc ::new ( store )
@ -523,29 +772,25 @@ impl Verifier {
creator_priv_key ,
repo_write_cap_secret ,
) ? ;
self . new_events_with_repo ( proto_events , & repo ) ? ;
self . new_events_with_repo ( proto_events , & repo ) . await ? ;
let repo = self . complete_site_store ( store_repo , repo ) ? ;
let repo_ref = self . repos . entry ( repo . id ) . or_insert ( repo ) ;
let repo_ref = self . add_repo_and_save ( repo ) ;
Ok ( repo_ref )
}
/// returns the Repo and the last seq_num of the peer
pub fn new_repo_default < ' a > (
pub async fn new_repo_default < ' a > (
& ' a mut self ,
creator : & UserId ,
creator_priv_key : & PrivKey ,
store_repo : & StoreRepo ,
) -> Result < & ' a Repo , NgError > {
let store = self . get_store_mut ( store_repo ) ;
let store = self . get_store_or_load ( store_repo ) ;
let repo_write_cap_secret = SymKey ::random ( ) ;
let ( repo , proto_events ) =
store . create_repo_default ( creator , creator_priv_key , repo_write_cap_secret ) ? ;
self . new_events_with_repo ( proto_events , & repo ) ? ;
// let mut events = vec![];
// for event in proto_events {
// events.push(self.new_event(&event.0, &event.1, &repo.store)?);
// }
let repo_ref = self . repos . entry ( repo . id ) . or_insert ( repo ) ;
self . new_events_with_repo ( proto_events , & repo ) . await ? ;
let repo_ref = self . add_repo_and_save ( repo ) ;
Ok ( repo_ref )
}
}
@ -557,8 +802,8 @@ mod test {
use ng_repo ::log ::* ;
use ng_repo ::store ::Store ;
#[ test ]
pub fn test_new_repo_default ( ) {
#[ async_std:: test]
pub async fn test_new_repo_default ( ) {
let ( creator_priv_key , creator_pub_key ) = generate_keypair ( ) ;
let ( publisher_privkey , publisher_pubkey ) = generate_keypair ( ) ;
@ -571,6 +816,7 @@ mod test {
let repo = verifier
. new_repo_default ( & creator_pub_key , & creator_priv_key , & store_repo )
. await
. expect ( "new_default" ) ;
log_debug ! ( "REPO OBJECT {}" , repo ) ;