diff --git a/Cargo.lock b/Cargo.lock index 6f0f317..6d714a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3385,7 +3385,7 @@ dependencies = [ [[package]] name = "ng-oxigraph" -version = "0.4.0-alpha.7-ngpreview5" +version = "0.4.0-alpha.7-ngpreview6" dependencies = [ "codspeed-criterion-compat", "digest 0.10.7", @@ -3446,8 +3446,8 @@ dependencies = [ [[package]] name = "ng-rocksdb" -version = "0.21.0-ngpreview.2" -source = "git+https://git.nextgraph.org/NextGraph/rust-rocksdb.git?branch=master#8965930ecd738d70a6cd1d3c9e406d2d9b8e902f" +version = "0.21.0-ngpreview.3" +source = "git+https://git.nextgraph.org/NextGraph/rust-rocksdb.git?branch=master#87f6cc1ec0bd265025d8afbee565e129aa0c2273" dependencies = [ "bindgen", "bzip2-sys", @@ -3487,7 +3487,7 @@ dependencies = [ [[package]] name = "ng-storage-rocksdb" -version = "0.1.0-preview.5" +version = "0.1.0-preview.6" dependencies = [ "ng-repo", "ng-rocksdb", diff --git a/ng-oxigraph/Cargo.toml b/ng-oxigraph/Cargo.toml index 4fcaecc..16336cd 100644 --- a/ng-oxigraph/Cargo.toml +++ b/ng-oxigraph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ng-oxigraph" -version = "0.4.0-alpha.7-ngpreview5" +version = "0.4.0-alpha.7-ngpreview6" authors = ["Tpt ", "Niko PLP "] license = "MIT OR Apache-2.0" readme = "README.md" @@ -14,6 +14,7 @@ a SPARQL database and RDF toolkit. fork for NextGraph """ edition = "2021" rust-version = "1.70" +build = "build.rs" [features] default = ["rdf-star","sep-0002","sep-0006", "oxsdatatypes"] @@ -44,9 +45,9 @@ quick-xml = ">=0.29, <0.32" memchr = "2.5" peg = "0.8" -[target.'cfg(all(not(target_family = "wasm")))'.dependencies] +[target.'cfg(all(not(target_family = "wasm"),not(docsrs)))'.dependencies] libc = "0.2" -ng-rocksdb = { version = "0.21.0-ngpreview.2", git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git", branch = "master", features = [ ] } +ng-rocksdb = { version = "0.21.0-ngpreview.3", git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git", branch = "master", features = [ ] } [target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] getrandom = "0.2.8" diff --git a/ng-oxigraph/build.rs b/ng-oxigraph/build.rs new file mode 100644 index 0000000..02716ec --- /dev/null +++ b/ng-oxigraph/build.rs @@ -0,0 +1,7 @@ +fn main() { + + if std::env::var("DOCS_RS").is_ok() { + println!("cargo:rustc-cfg=docsrs"); + } + +} \ No newline at end of file diff --git a/ng-oxigraph/src/oxigraph/storage/backend/mod.rs b/ng-oxigraph/src/oxigraph/storage/backend/mod.rs index b94eb65..1a29081 100644 --- a/ng-oxigraph/src/oxigraph/storage/backend/mod.rs +++ b/ng-oxigraph/src/oxigraph/storage/backend/mod.rs @@ -1,12 +1,12 @@ //! A storage backend //! RocksDB is available, if not in memory -#[cfg(any(target_family = "wasm"))] +#[cfg(any(target_family = "wasm",docsrs))] pub use fallback::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub use oxi_rocksdb::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction}; -#[cfg(any(target_family = "wasm"))] +#[cfg(any(target_family = "wasm",docsrs))] mod fallback; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] mod oxi_rocksdb; diff --git a/ng-oxigraph/src/oxigraph/storage/binary_encoder.rs b/ng-oxigraph/src/oxigraph/storage/binary_encoder.rs index d1cf1ac..427bb7b 100644 --- a/ng-oxigraph/src/oxigraph/storage/binary_encoder.rs +++ b/ng-oxigraph/src/oxigraph/storage/binary_encoder.rs @@ -5,7 +5,7 @@ use crate::oxsdatatypes::*; use std::io::Read; use std::mem::size_of; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub const LATEST_STORAGE_VERSION: u64 = 1; pub const WRITTEN_TERM_MAX_SIZE: usize = size_of::() + 2 * size_of::(); diff --git a/ng-oxigraph/src/oxigraph/storage/mod.rs b/ng-oxigraph/src/oxigraph/storage/mod.rs index a20740e..e448490 100644 --- a/ng-oxigraph/src/oxigraph/storage/mod.rs +++ b/ng-oxigraph/src/oxigraph/storage/mod.rs @@ -1,9 +1,9 @@ #![allow(clippy::same_name_method)] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use crate::oxigraph::model::Quad; use crate::oxigraph::model::{GraphNameRef, NamedOrBlankNodeRef, QuadRef, TermRef}; use crate::oxigraph::storage::backend::{Reader, Transaction}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use crate::oxigraph::storage::binary_encoder::LATEST_STORAGE_VERSION; use crate::oxigraph::storage::binary_encoder::{ decode_term, encode_term, encode_term_pair, encode_term_quad, encode_term_triple, @@ -14,24 +14,24 @@ use crate::oxigraph::storage::binary_encoder::{ pub use crate::oxigraph::storage::error::{ CorruptionError, LoaderError, SerializerError, StorageError, }; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use crate::oxigraph::storage::numeric_encoder::Decoder; use crate::oxigraph::storage::numeric_encoder::{ insert_term, EncodedQuad, EncodedTerm, StrHash, StrLookup, }; use backend::{ColumnFamily, ColumnFamilyDefinition, Db, Iter}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::collections::VecDeque; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::collections::{HashMap, HashSet}; use std::error::Error; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::mem::{swap, take}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::path::{Path, PathBuf}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::sync::Mutex; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::{io, thread}; mod backend; @@ -51,16 +51,16 @@ const DSPO_CF: &str = "dspo"; const DPOS_CF: &str = "dpos"; const DOSP_CF: &str = "dosp"; const GRAPHS_CF: &str = "graphs"; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] const DEFAULT_CF: &str = "default"; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] const DEFAULT_BULK_LOAD_BATCH_SIZE: usize = 1_000_000; /// Low level storage primitives #[derive(Clone)] pub struct Storage { db: Db, - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] default_cf: ColumnFamily, id2str_cf: ColumnFamily, spog_cf: ColumnFamily, @@ -80,7 +80,7 @@ impl Storage { Self::setup(Db::new(Self::column_families())?) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn open(path: &Path, key: Option<[u8; 32]>) -> Result { Self::setup(Db::open_read_write( Some(path), @@ -89,7 +89,7 @@ impl Storage { )?) } - // #[cfg(all(not(target_family = "wasm")))] + // #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // pub fn open_secondary(primary_path: &Path) -> Result { // Self::setup(Db::open_secondary( // primary_path, @@ -98,7 +98,7 @@ impl Storage { // )?) // } - // #[cfg(all(not(target_family = "wasm")))] + // #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // pub fn open_persistent_secondary( // primary_path: &Path, // secondary_path: &Path, @@ -110,7 +110,7 @@ impl Storage { // )?) // } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn open_read_only(path: &Path, key: Option<[u8; 32]>) -> Result { Self::setup(Db::open_read_only(path, Self::column_families(), key)?) } @@ -188,7 +188,7 @@ impl Storage { fn setup(db: Db) -> Result { let this = Self { - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] default_cf: db.column_family(DEFAULT_CF)?, id2str_cf: db.column_family(ID2STR_CF)?, spog_cf: db.column_family(SPOG_CF)?, @@ -203,12 +203,12 @@ impl Storage { graphs_cf: db.column_family(GRAPHS_CF)?, db, }; - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] this.migrate()?; Ok(this) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn migrate(&self) -> Result<(), StorageError> { let mut version = self.ensure_version()?; if version == 0 { @@ -248,7 +248,7 @@ impl Storage { } } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn ensure_version(&self) -> Result { Ok( if let Some(version) = self.db.get(&self.default_cf, b"oxversion")? { @@ -262,7 +262,7 @@ impl Storage { ) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn update_version(&self, version: u64) -> Result<(), StorageError> { self.db .insert(&self.default_cf, b"oxversion", &version.to_be_bytes())?; @@ -289,12 +289,12 @@ impl Storage { }) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn flush(&self) -> Result<(), StorageError> { self.db.flush() } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn compact(&self) -> Result<(), StorageError> { self.db.compact(&self.default_cf)?; self.db.compact(&self.gspo_cf)?; @@ -309,7 +309,7 @@ impl Storage { self.db.compact(&self.id2str_cf) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn backup(&self, target_directory: &Path) -> Result<(), StorageError> { self.db.backup(target_directory) } @@ -634,7 +634,7 @@ impl StorageReader { } } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn get_str(&self, key: &StrHash) -> Result, StorageError> { Ok(self .storage @@ -645,7 +645,7 @@ impl StorageReader { .map_err(CorruptionError::new)?) } - #[cfg(any(target_family = "wasm"))] + #[cfg(any(target_family = "wasm",docsrs))] pub fn get_str(&self, key: &StrHash) -> Result, StorageError> { Ok(self .reader @@ -655,21 +655,21 @@ impl StorageReader { .map_err(CorruptionError::new)?) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn contains_str(&self, key: &StrHash) -> Result { self.storage .db .contains_key(&self.storage.id2str_cf, &key.to_be_bytes()) } - #[cfg(any(target_family = "wasm"))] + #[cfg(any(target_family = "wasm",docsrs))] pub fn contains_str(&self, key: &StrHash) -> Result { self.reader .contains_key(&self.storage.id2str_cf, &key.to_be_bytes()) } /// Validates that all the storage invariants held in the data - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn validate(&self) -> Result<(), StorageError> { // triples let dspo_size = self.dspo_quads(&[]).count(); @@ -781,7 +781,7 @@ impl StorageReader { } /// Validates that all the storage invariants held in the data - #[cfg(any(target_family = "wasm"))] + #[cfg(any(target_family = "wasm",docsrs))] #[allow(clippy::unused_self, clippy::unnecessary_wraps)] pub fn validate(&self) -> Result<(), StorageError> { Ok(()) // TODO @@ -1005,7 +1005,7 @@ impl<'a> StorageWriter<'a> { } } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> { if self .storage @@ -1021,7 +1021,7 @@ impl<'a> StorageWriter<'a> { ) } - #[cfg(any(target_family = "wasm"))] + #[cfg(any(target_family = "wasm",docsrs))] fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> { self.transaction.insert( &self.storage.id2str_cf, @@ -1186,7 +1186,7 @@ impl<'a> StorageWriter<'a> { } } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] #[must_use] pub struct StorageBulkLoader { storage: Storage, @@ -1195,7 +1195,7 @@ pub struct StorageBulkLoader { max_memory_size: Option, } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] impl StorageBulkLoader { pub fn new(storage: Storage) -> Self { Self { @@ -1326,7 +1326,7 @@ impl StorageBulkLoader { } } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] struct FileBulkLoader<'a> { storage: &'a Storage, id2str: HashMap>, @@ -1335,7 +1335,7 @@ struct FileBulkLoader<'a> { graphs: HashSet, } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] impl<'a> FileBulkLoader<'a> { fn new(storage: &'a Storage, batch_size: usize) -> Self { Self { @@ -1541,7 +1541,7 @@ impl<'a> FileBulkLoader<'a> { } } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn map_thread_result(result: thread::Result) -> io::Result { result.map_err(|e| { io::Error::new( diff --git a/ng-oxigraph/src/oxigraph/store.rs b/ng-oxigraph/src/oxigraph/store.rs index 310901e..acc4b42 100644 --- a/ng-oxigraph/src/oxigraph/store.rs +++ b/ng-oxigraph/src/oxigraph/store.rs @@ -25,7 +25,7 @@ //! }; //! # Result::<_, Box>::Ok(()) //! ``` -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use super::io::RdfParseError; use super::io::{RdfFormat, RdfParser, RdfSerializer}; use super::model::*; @@ -34,7 +34,7 @@ use super::sparql::{ QueryResults, Update, UpdateOptions, }; use super::storage::numeric_encoder::{Decoder, EncodedQuad, EncodedTerm}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use super::storage::StorageBulkLoader; use super::storage::{ ChainedDecodingQuadIterator, DecodingGraphIterator, Storage, StorageReader, StorageWriter, @@ -42,7 +42,7 @@ use super::storage::{ pub use super::storage::{CorruptionError, LoaderError, SerializerError, StorageError}; use std::error::Error; use std::io::{Read, Write}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::path::Path; use std::{fmt, str}; @@ -100,14 +100,14 @@ impl Store { /// Only one read-write [`Store`] can exist at the same time. /// If you want to have extra [`Store`] instance opened on a same data /// use [`Store::open_read_only`]. - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn open(path: impl AsRef) -> Result { Ok(Self { storage: Storage::open(path.as_ref(), None)?, }) } - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn open_with_key(path: impl AsRef, key: [u8; 32]) -> Result { Ok(Self { storage: Storage::open(path.as_ref(), Some(key))?, @@ -124,7 +124,7 @@ impl Store { // /// If you prefer persistent storage use [`Store::open_persistent_secondary`]. // /// // /// If you want to simple read-only [`Store`] use [`Store::open_read_only`]. - // #[cfg(all(not(target_family = "wasm")))] + // #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // pub fn open_secondary(primary_path: impl AsRef) -> Result { // Ok(Self { // storage: Storage::open_secondary(primary_path.as_ref())?, @@ -139,7 +139,7 @@ impl Store { /// `primary_path` must be the path of the primary instance and `secondary_path` an other directory for the secondary instance cache. /// /// If you want to simple read-only [`Store`] use [`Store::open_read_only`]. - // #[cfg(all(not(target_family = "wasm")))] + // #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // pub fn open_persistent_secondary( // primary_path: impl AsRef, // secondary_path: impl AsRef, @@ -155,7 +155,7 @@ impl Store { /// Opens a read-only [`Store`] from disk. /// /// Opening as read-only while having an other process writing the database is undefined behavior. - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn open_read_only( path: impl AsRef, key: Option<[u8; 32]>, @@ -939,7 +939,7 @@ impl Store { /// Flushes all buffers and ensures that all writes are saved on disk. /// /// Flushes are automatically done using background threads but might lag a little bit. - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn flush(&self) -> Result<(), StorageError> { self.storage.flush() } @@ -949,7 +949,7 @@ impl Store { /// Useful to call after a batch upload or another similar operation. /// ///
Can take hours on huge databases.
- #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn optimize(&self) -> Result<(), StorageError> { self.storage.compact() } @@ -972,7 +972,7 @@ impl Store { /// This allows cheap regular backups. /// /// If you want to move your data to another RDF storage system, you should have a look at the [`Store::dump_to_write`] function instead. - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn backup(&self, target_directory: impl AsRef) -> Result<(), StorageError> { self.storage.backup(target_directory.as_ref()) } @@ -999,7 +999,7 @@ impl Store { /// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?); /// # Result::<_, Box>::Ok(()) /// ``` - #[cfg(all(not(target_family = "wasm")))] + #[cfg(all(not(target_family = "wasm"),not(docsrs)))] pub fn bulk_loader(&self) -> BulkLoader { BulkLoader { storage: StorageBulkLoader::new(self.storage.clone()), @@ -1617,14 +1617,14 @@ impl Iterator for GraphNameIter { /// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?); /// # Result::<_, Box>::Ok(()) /// ``` -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] #[must_use] pub struct BulkLoader { storage: StorageBulkLoader, on_parse_error: Option Result<(), RdfParseError>>>, } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] impl BulkLoader { /// Sets the maximal number of threads to be used by the bulk loader per operation. /// diff --git a/ng-oxigraph/tests/store.rs b/ng-oxigraph/tests/store.rs index 1477b6e..0f36aba 100644 --- a/ng-oxigraph/tests/store.rs +++ b/ng-oxigraph/tests/store.rs @@ -5,20 +5,20 @@ use ng_oxigraph::oxigraph::io::RdfFormat; use ng_oxigraph::oxigraph::model::vocab::{rdf, xsd}; use ng_oxigraph::oxigraph::model::*; use ng_oxigraph::oxigraph::store::Store; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use rand::random; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::env::temp_dir; use std::error::Error; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::fs::{create_dir_all, remove_dir_all, File}; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::io::Write; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::iter::empty; #[cfg(all(target_os = "linux"))] use std::iter::once; -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] use std::path::{Path, PathBuf}; #[cfg(all(target_os = "linux"))] use std::process::Command; @@ -121,7 +121,7 @@ fn test_load_graph() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_bulk_load_graph() -> Result<(), Box> { let store = Store::new()?; store @@ -135,7 +135,7 @@ fn test_bulk_load_graph() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_bulk_load_graph_lenient() -> Result<(), Box> { let store = Store::new()?; store.bulk_loader().on_parse_error(|_| Ok(())).load_from_read( @@ -154,7 +154,7 @@ fn test_bulk_load_graph_lenient() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_bulk_load_empty() -> Result<(), Box> { let store = Store::new()?; store.bulk_loader().load_quads(empty::())?; @@ -177,7 +177,7 @@ fn test_load_dataset() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_bulk_load_dataset() -> Result<(), Box> { let store = Store::new()?; store @@ -258,7 +258,7 @@ fn test_snapshot_isolation_iterator() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box> { let quad = QuadRef::new( NamedNodeRef::new_unchecked("http://example.com/s"), @@ -274,7 +274,7 @@ fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box Result<(), Box> { let dir = TempDir::default(); create_dir_all(&dir.0)?; @@ -304,7 +304,7 @@ fn test_bad_stt_open() -> Result<(), Box> { } // #[test] -// #[cfg(all(not(target_family = "wasm")))] +// #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // fn test_backup() -> Result<(), Box> { // let quad = QuadRef::new( // NamedNodeRef::new_unchecked("http://example.com/s"), @@ -344,7 +344,7 @@ fn test_bad_stt_open() -> Result<(), Box> { // } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_bad_backup() -> Result<(), Box> { let store_dir = TempDir::default(); let backup_dir = TempDir::default(); @@ -355,7 +355,7 @@ fn test_bad_backup() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_backup_on_in_memory() -> Result<(), Box> { let backup_dir = TempDir::default(); Store::new()?.backup(&backup_dir).unwrap_err(); @@ -387,7 +387,7 @@ fn test_backward_compatibility() -> Result<(), Box> { } // #[test] -// #[cfg(all(not(target_family = "wasm")))] +// #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // fn test_secondary() -> Result<(), Box> { // let quad = QuadRef::new( // NamedNodeRef::new_unchecked("http://example.com/s"), @@ -430,7 +430,7 @@ fn test_backward_compatibility() -> Result<(), Box> { // } // #[test] -// #[cfg(all(not(target_family = "wasm")))] +// #[cfg(all(not(target_family = "wasm"),not(docsrs)))] // fn test_open_secondary_bad_dir() -> Result<(), Box> { // let primary_dir = TempDir::default(); // create_dir_all(&primary_dir.0)?; @@ -442,7 +442,7 @@ fn test_backward_compatibility() -> Result<(), Box> { // } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_read_only() -> Result<(), Box> { let s = NamedNodeRef::new_unchecked("http://example.com/s"); let p = NamedNodeRef::new_unchecked("http://example.com/p"); @@ -491,7 +491,7 @@ fn test_read_only() -> Result<(), Box> { } #[test] -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] fn test_open_read_only_bad_dir() -> Result<(), Box> { let dir = TempDir::default(); create_dir_all(&dir.0)?; @@ -515,24 +515,24 @@ fn reset_dir(dir: &str) -> Result<(), Box> { Ok(()) } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] struct TempDir(PathBuf); -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] impl Default for TempDir { fn default() -> Self { Self(temp_dir().join(format!("oxigraph-test-{}", random::()))) } } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] impl AsRef for TempDir { fn as_ref(&self) -> &Path { &self.0 } } -#[cfg(all(not(target_family = "wasm")))] +#[cfg(all(not(target_family = "wasm"),not(docsrs)))] impl Drop for TempDir { fn drop(&mut self) { if self.0.is_dir() { diff --git a/ng-storage-rocksdb/Cargo.toml b/ng-storage-rocksdb/Cargo.toml index bf0f274..04ab22b 100644 --- a/ng-storage-rocksdb/Cargo.toml +++ b/ng-storage-rocksdb/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ng-storage-rocksdb" -version = "0.1.0-preview.5" +version = "0.1.0-preview.6" description = "Stores based on RocksDB for NextGraph" edition.workspace = true license.workspace = true @@ -19,4 +19,4 @@ ng-repo = { path = "../ng-repo", version = "0.1.0-preview.1" } git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git" branch = "master" features = [ ] -version = "0.21.0-ngpreview.2" +version = "0.21.0-ngpreview.3" diff --git a/ng-storage-rocksdb/README.md b/ng-storage-rocksdb/README.md index 49b3696..cad45b6 100644 --- a/ng-storage-rocksdb/README.md +++ b/ng-storage-rocksdb/README.md @@ -4,7 +4,7 @@ [![Apache 2.0 Licensed][license-image]][license-link] [![MIT Licensed][license-image2]][license-link2] -Stores based on RocksDB for NextGraph +Storage backend based on RocksDB for NextGraph This repository is in active development at [https://git.nextgraph.org/NextGraph/nextgraph-rs](https://git.nextgraph.org/NextGraph/nextgraph-rs), a Gitea instance. For bug reports, issues, merge requests, and in order to join the dev team, please visit the link above and create an account (you can do so with a github account). The [github repo](https://github.com/nextgraph-org/nextgraph-rs) is just a read-only mirror that does not accept issues. diff --git a/ng-verifier/README.md b/ng-verifier/README.md index 2133d01..69225cb 100644 --- a/ng-verifier/README.md +++ b/ng-verifier/README.md @@ -1,10 +1,12 @@ -# Broker library of NextGraph +# Verifier library of NextGraph ![MSRV][rustc-image] [![Apache 2.0 Licensed][license-image]][license-link] [![MIT Licensed][license-image2]][license-link2] -Rust client library of NextGraph +The verifier is locally decrypting the incoming commits and building the materialized state of the documents. + +It serves an API to the Apps that can read, write and query the materialized state. This repository is in active development at [https://git.nextgraph.org/NextGraph/nextgraph-rs](https://git.nextgraph.org/NextGraph/nextgraph-rs), a Gitea instance. For bug reports, issues, merge requests, and in order to join the dev team, please visit the link above and create an account (you can do so with a github account). The [github repo](https://github.com/nextgraph-org/nextgraph-rs) is just a read-only mirror that does not accept issues.