ng-oxigraph preview.6

pull/19/head
Niko PLP 7 months ago
parent bfb077edfd
commit 295dfa5cc3
  1. 8
      Cargo.lock
  2. 7
      ng-oxigraph/Cargo.toml
  3. 7
      ng-oxigraph/build.rs
  4. 8
      ng-oxigraph/src/oxigraph/storage/backend/mod.rs
  5. 2
      ng-oxigraph/src/oxigraph/storage/binary_encoder.rs
  6. 74
      ng-oxigraph/src/oxigraph/storage/mod.rs
  7. 28
      ng-oxigraph/src/oxigraph/store.rs
  8. 46
      ng-oxigraph/tests/store.rs
  9. 4
      ng-storage-rocksdb/Cargo.toml
  10. 2
      ng-storage-rocksdb/README.md
  11. 6
      ng-verifier/README.md

8
Cargo.lock generated

@ -3385,7 +3385,7 @@ dependencies = [
[[package]] [[package]]
name = "ng-oxigraph" name = "ng-oxigraph"
version = "0.4.0-alpha.7-ngpreview5" version = "0.4.0-alpha.7-ngpreview6"
dependencies = [ dependencies = [
"codspeed-criterion-compat", "codspeed-criterion-compat",
"digest 0.10.7", "digest 0.10.7",
@ -3446,8 +3446,8 @@ dependencies = [
[[package]] [[package]]
name = "ng-rocksdb" name = "ng-rocksdb"
version = "0.21.0-ngpreview.2" version = "0.21.0-ngpreview.3"
source = "git+https://git.nextgraph.org/NextGraph/rust-rocksdb.git?branch=master#8965930ecd738d70a6cd1d3c9e406d2d9b8e902f" source = "git+https://git.nextgraph.org/NextGraph/rust-rocksdb.git?branch=master#87f6cc1ec0bd265025d8afbee565e129aa0c2273"
dependencies = [ dependencies = [
"bindgen", "bindgen",
"bzip2-sys", "bzip2-sys",
@ -3487,7 +3487,7 @@ dependencies = [
[[package]] [[package]]
name = "ng-storage-rocksdb" name = "ng-storage-rocksdb"
version = "0.1.0-preview.5" version = "0.1.0-preview.6"
dependencies = [ dependencies = [
"ng-repo", "ng-repo",
"ng-rocksdb", "ng-rocksdb",

@ -1,6 +1,6 @@
[package] [package]
name = "ng-oxigraph" name = "ng-oxigraph"
version = "0.4.0-alpha.7-ngpreview5" version = "0.4.0-alpha.7-ngpreview6"
authors = ["Tpt <thomas@pellissier-tanon.fr>", "Niko PLP <niko@nextgraph.org>"] authors = ["Tpt <thomas@pellissier-tanon.fr>", "Niko PLP <niko@nextgraph.org>"]
license = "MIT OR Apache-2.0" license = "MIT OR Apache-2.0"
readme = "README.md" readme = "README.md"
@ -14,6 +14,7 @@ a SPARQL database and RDF toolkit. fork for NextGraph
""" """
edition = "2021" edition = "2021"
rust-version = "1.70" rust-version = "1.70"
build = "build.rs"
[features] [features]
default = ["rdf-star","sep-0002","sep-0006", "oxsdatatypes"] default = ["rdf-star","sep-0002","sep-0006", "oxsdatatypes"]
@ -44,9 +45,9 @@ quick-xml = ">=0.29, <0.32"
memchr = "2.5" memchr = "2.5"
peg = "0.8" peg = "0.8"
[target.'cfg(all(not(target_family = "wasm")))'.dependencies] [target.'cfg(all(not(target_family = "wasm"),not(docsrs)))'.dependencies]
libc = "0.2" libc = "0.2"
ng-rocksdb = { version = "0.21.0-ngpreview.2", git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git", branch = "master", features = [ ] } ng-rocksdb = { version = "0.21.0-ngpreview.3", git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git", branch = "master", features = [ ] }
[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] [target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies]
getrandom = "0.2.8" getrandom = "0.2.8"

@ -0,0 +1,7 @@
fn main() {
if std::env::var("DOCS_RS").is_ok() {
println!("cargo:rustc-cfg=docsrs");
}
}

@ -1,12 +1,12 @@
//! A storage backend //! A storage backend
//! RocksDB is available, if not in memory //! RocksDB is available, if not in memory
#[cfg(any(target_family = "wasm"))] #[cfg(any(target_family = "wasm",docsrs))]
pub use fallback::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction}; pub use fallback::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub use oxi_rocksdb::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction}; pub use oxi_rocksdb::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction};
#[cfg(any(target_family = "wasm"))] #[cfg(any(target_family = "wasm",docsrs))]
mod fallback; mod fallback;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
mod oxi_rocksdb; mod oxi_rocksdb;

@ -5,7 +5,7 @@ use crate::oxsdatatypes::*;
use std::io::Read; use std::io::Read;
use std::mem::size_of; use std::mem::size_of;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub const LATEST_STORAGE_VERSION: u64 = 1; pub const LATEST_STORAGE_VERSION: u64 = 1;
pub const WRITTEN_TERM_MAX_SIZE: usize = size_of::<u8>() + 2 * size_of::<StrHash>(); pub const WRITTEN_TERM_MAX_SIZE: usize = size_of::<u8>() + 2 * size_of::<StrHash>();

@ -1,9 +1,9 @@
#![allow(clippy::same_name_method)] #![allow(clippy::same_name_method)]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use crate::oxigraph::model::Quad; use crate::oxigraph::model::Quad;
use crate::oxigraph::model::{GraphNameRef, NamedOrBlankNodeRef, QuadRef, TermRef}; use crate::oxigraph::model::{GraphNameRef, NamedOrBlankNodeRef, QuadRef, TermRef};
use crate::oxigraph::storage::backend::{Reader, Transaction}; use crate::oxigraph::storage::backend::{Reader, Transaction};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use crate::oxigraph::storage::binary_encoder::LATEST_STORAGE_VERSION; use crate::oxigraph::storage::binary_encoder::LATEST_STORAGE_VERSION;
use crate::oxigraph::storage::binary_encoder::{ use crate::oxigraph::storage::binary_encoder::{
decode_term, encode_term, encode_term_pair, encode_term_quad, encode_term_triple, decode_term, encode_term, encode_term_pair, encode_term_quad, encode_term_triple,
@ -14,24 +14,24 @@ use crate::oxigraph::storage::binary_encoder::{
pub use crate::oxigraph::storage::error::{ pub use crate::oxigraph::storage::error::{
CorruptionError, LoaderError, SerializerError, StorageError, CorruptionError, LoaderError, SerializerError, StorageError,
}; };
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use crate::oxigraph::storage::numeric_encoder::Decoder; use crate::oxigraph::storage::numeric_encoder::Decoder;
use crate::oxigraph::storage::numeric_encoder::{ use crate::oxigraph::storage::numeric_encoder::{
insert_term, EncodedQuad, EncodedTerm, StrHash, StrLookup, insert_term, EncodedQuad, EncodedTerm, StrHash, StrLookup,
}; };
use backend::{ColumnFamily, ColumnFamilyDefinition, Db, Iter}; use backend::{ColumnFamily, ColumnFamilyDefinition, Db, Iter};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::collections::VecDeque; use std::collections::VecDeque;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::error::Error; use std::error::Error;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::mem::{swap, take}; use std::mem::{swap, take};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::sync::Mutex; use std::sync::Mutex;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::{io, thread}; use std::{io, thread};
mod backend; mod backend;
@ -51,16 +51,16 @@ const DSPO_CF: &str = "dspo";
const DPOS_CF: &str = "dpos"; const DPOS_CF: &str = "dpos";
const DOSP_CF: &str = "dosp"; const DOSP_CF: &str = "dosp";
const GRAPHS_CF: &str = "graphs"; const GRAPHS_CF: &str = "graphs";
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
const DEFAULT_CF: &str = "default"; const DEFAULT_CF: &str = "default";
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
const DEFAULT_BULK_LOAD_BATCH_SIZE: usize = 1_000_000; const DEFAULT_BULK_LOAD_BATCH_SIZE: usize = 1_000_000;
/// Low level storage primitives /// Low level storage primitives
#[derive(Clone)] #[derive(Clone)]
pub struct Storage { pub struct Storage {
db: Db, db: Db,
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
default_cf: ColumnFamily, default_cf: ColumnFamily,
id2str_cf: ColumnFamily, id2str_cf: ColumnFamily,
spog_cf: ColumnFamily, spog_cf: ColumnFamily,
@ -80,7 +80,7 @@ impl Storage {
Self::setup(Db::new(Self::column_families())?) Self::setup(Db::new(Self::column_families())?)
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn open(path: &Path, key: Option<[u8; 32]>) -> Result<Self, StorageError> { pub fn open(path: &Path, key: Option<[u8; 32]>) -> Result<Self, StorageError> {
Self::setup(Db::open_read_write( Self::setup(Db::open_read_write(
Some(path), Some(path),
@ -89,7 +89,7 @@ impl Storage {
)?) )?)
} }
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// pub fn open_secondary(primary_path: &Path) -> Result<Self, StorageError> { // pub fn open_secondary(primary_path: &Path) -> Result<Self, StorageError> {
// Self::setup(Db::open_secondary( // Self::setup(Db::open_secondary(
// primary_path, // primary_path,
@ -98,7 +98,7 @@ impl Storage {
// )?) // )?)
// } // }
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// pub fn open_persistent_secondary( // pub fn open_persistent_secondary(
// primary_path: &Path, // primary_path: &Path,
// secondary_path: &Path, // secondary_path: &Path,
@ -110,7 +110,7 @@ impl Storage {
// )?) // )?)
// } // }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn open_read_only(path: &Path, key: Option<[u8; 32]>) -> Result<Self, StorageError> { pub fn open_read_only(path: &Path, key: Option<[u8; 32]>) -> Result<Self, StorageError> {
Self::setup(Db::open_read_only(path, Self::column_families(), key)?) Self::setup(Db::open_read_only(path, Self::column_families(), key)?)
} }
@ -188,7 +188,7 @@ impl Storage {
fn setup(db: Db) -> Result<Self, StorageError> { fn setup(db: Db) -> Result<Self, StorageError> {
let this = Self { let this = Self {
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
default_cf: db.column_family(DEFAULT_CF)?, default_cf: db.column_family(DEFAULT_CF)?,
id2str_cf: db.column_family(ID2STR_CF)?, id2str_cf: db.column_family(ID2STR_CF)?,
spog_cf: db.column_family(SPOG_CF)?, spog_cf: db.column_family(SPOG_CF)?,
@ -203,12 +203,12 @@ impl Storage {
graphs_cf: db.column_family(GRAPHS_CF)?, graphs_cf: db.column_family(GRAPHS_CF)?,
db, db,
}; };
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
this.migrate()?; this.migrate()?;
Ok(this) Ok(this)
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn migrate(&self) -> Result<(), StorageError> { fn migrate(&self) -> Result<(), StorageError> {
let mut version = self.ensure_version()?; let mut version = self.ensure_version()?;
if version == 0 { if version == 0 {
@ -248,7 +248,7 @@ impl Storage {
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn ensure_version(&self) -> Result<u64, StorageError> { fn ensure_version(&self) -> Result<u64, StorageError> {
Ok( Ok(
if let Some(version) = self.db.get(&self.default_cf, b"oxversion")? { if let Some(version) = self.db.get(&self.default_cf, b"oxversion")? {
@ -262,7 +262,7 @@ impl Storage {
) )
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn update_version(&self, version: u64) -> Result<(), StorageError> { fn update_version(&self, version: u64) -> Result<(), StorageError> {
self.db self.db
.insert(&self.default_cf, b"oxversion", &version.to_be_bytes())?; .insert(&self.default_cf, b"oxversion", &version.to_be_bytes())?;
@ -289,12 +289,12 @@ impl Storage {
}) })
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn flush(&self) -> Result<(), StorageError> { pub fn flush(&self) -> Result<(), StorageError> {
self.db.flush() self.db.flush()
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn compact(&self) -> Result<(), StorageError> { pub fn compact(&self) -> Result<(), StorageError> {
self.db.compact(&self.default_cf)?; self.db.compact(&self.default_cf)?;
self.db.compact(&self.gspo_cf)?; self.db.compact(&self.gspo_cf)?;
@ -309,7 +309,7 @@ impl Storage {
self.db.compact(&self.id2str_cf) self.db.compact(&self.id2str_cf)
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn backup(&self, target_directory: &Path) -> Result<(), StorageError> { pub fn backup(&self, target_directory: &Path) -> Result<(), StorageError> {
self.db.backup(target_directory) self.db.backup(target_directory)
} }
@ -634,7 +634,7 @@ impl StorageReader {
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn get_str(&self, key: &StrHash) -> Result<Option<String>, StorageError> { pub fn get_str(&self, key: &StrHash) -> Result<Option<String>, StorageError> {
Ok(self Ok(self
.storage .storage
@ -645,7 +645,7 @@ impl StorageReader {
.map_err(CorruptionError::new)?) .map_err(CorruptionError::new)?)
} }
#[cfg(any(target_family = "wasm"))] #[cfg(any(target_family = "wasm",docsrs))]
pub fn get_str(&self, key: &StrHash) -> Result<Option<String>, StorageError> { pub fn get_str(&self, key: &StrHash) -> Result<Option<String>, StorageError> {
Ok(self Ok(self
.reader .reader
@ -655,21 +655,21 @@ impl StorageReader {
.map_err(CorruptionError::new)?) .map_err(CorruptionError::new)?)
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn contains_str(&self, key: &StrHash) -> Result<bool, StorageError> { pub fn contains_str(&self, key: &StrHash) -> Result<bool, StorageError> {
self.storage self.storage
.db .db
.contains_key(&self.storage.id2str_cf, &key.to_be_bytes()) .contains_key(&self.storage.id2str_cf, &key.to_be_bytes())
} }
#[cfg(any(target_family = "wasm"))] #[cfg(any(target_family = "wasm",docsrs))]
pub fn contains_str(&self, key: &StrHash) -> Result<bool, StorageError> { pub fn contains_str(&self, key: &StrHash) -> Result<bool, StorageError> {
self.reader self.reader
.contains_key(&self.storage.id2str_cf, &key.to_be_bytes()) .contains_key(&self.storage.id2str_cf, &key.to_be_bytes())
} }
/// Validates that all the storage invariants held in the data /// Validates that all the storage invariants held in the data
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn validate(&self) -> Result<(), StorageError> { pub fn validate(&self) -> Result<(), StorageError> {
// triples // triples
let dspo_size = self.dspo_quads(&[]).count(); let dspo_size = self.dspo_quads(&[]).count();
@ -781,7 +781,7 @@ impl StorageReader {
} }
/// Validates that all the storage invariants held in the data /// Validates that all the storage invariants held in the data
#[cfg(any(target_family = "wasm"))] #[cfg(any(target_family = "wasm",docsrs))]
#[allow(clippy::unused_self, clippy::unnecessary_wraps)] #[allow(clippy::unused_self, clippy::unnecessary_wraps)]
pub fn validate(&self) -> Result<(), StorageError> { pub fn validate(&self) -> Result<(), StorageError> {
Ok(()) // TODO Ok(()) // TODO
@ -1005,7 +1005,7 @@ impl<'a> StorageWriter<'a> {
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> { fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> {
if self if self
.storage .storage
@ -1021,7 +1021,7 @@ impl<'a> StorageWriter<'a> {
) )
} }
#[cfg(any(target_family = "wasm"))] #[cfg(any(target_family = "wasm",docsrs))]
fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> { fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> {
self.transaction.insert( self.transaction.insert(
&self.storage.id2str_cf, &self.storage.id2str_cf,
@ -1186,7 +1186,7 @@ impl<'a> StorageWriter<'a> {
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
#[must_use] #[must_use]
pub struct StorageBulkLoader { pub struct StorageBulkLoader {
storage: Storage, storage: Storage,
@ -1195,7 +1195,7 @@ pub struct StorageBulkLoader {
max_memory_size: Option<usize>, max_memory_size: Option<usize>,
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
impl StorageBulkLoader { impl StorageBulkLoader {
pub fn new(storage: Storage) -> Self { pub fn new(storage: Storage) -> Self {
Self { Self {
@ -1326,7 +1326,7 @@ impl StorageBulkLoader {
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
struct FileBulkLoader<'a> { struct FileBulkLoader<'a> {
storage: &'a Storage, storage: &'a Storage,
id2str: HashMap<StrHash, Box<str>>, id2str: HashMap<StrHash, Box<str>>,
@ -1335,7 +1335,7 @@ struct FileBulkLoader<'a> {
graphs: HashSet<EncodedTerm>, graphs: HashSet<EncodedTerm>,
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
impl<'a> FileBulkLoader<'a> { impl<'a> FileBulkLoader<'a> {
fn new(storage: &'a Storage, batch_size: usize) -> Self { fn new(storage: &'a Storage, batch_size: usize) -> Self {
Self { Self {
@ -1541,7 +1541,7 @@ impl<'a> FileBulkLoader<'a> {
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn map_thread_result<R>(result: thread::Result<R>) -> io::Result<R> { fn map_thread_result<R>(result: thread::Result<R>) -> io::Result<R> {
result.map_err(|e| { result.map_err(|e| {
io::Error::new( io::Error::new(

@ -25,7 +25,7 @@
//! }; //! };
//! # Result::<_, Box<dyn std::error::Error>>::Ok(()) //! # Result::<_, Box<dyn std::error::Error>>::Ok(())
//! ``` //! ```
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use super::io::RdfParseError; use super::io::RdfParseError;
use super::io::{RdfFormat, RdfParser, RdfSerializer}; use super::io::{RdfFormat, RdfParser, RdfSerializer};
use super::model::*; use super::model::*;
@ -34,7 +34,7 @@ use super::sparql::{
QueryResults, Update, UpdateOptions, QueryResults, Update, UpdateOptions,
}; };
use super::storage::numeric_encoder::{Decoder, EncodedQuad, EncodedTerm}; use super::storage::numeric_encoder::{Decoder, EncodedQuad, EncodedTerm};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use super::storage::StorageBulkLoader; use super::storage::StorageBulkLoader;
use super::storage::{ use super::storage::{
ChainedDecodingQuadIterator, DecodingGraphIterator, Storage, StorageReader, StorageWriter, ChainedDecodingQuadIterator, DecodingGraphIterator, Storage, StorageReader, StorageWriter,
@ -42,7 +42,7 @@ use super::storage::{
pub use super::storage::{CorruptionError, LoaderError, SerializerError, StorageError}; pub use super::storage::{CorruptionError, LoaderError, SerializerError, StorageError};
use std::error::Error; use std::error::Error;
use std::io::{Read, Write}; use std::io::{Read, Write};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::path::Path; use std::path::Path;
use std::{fmt, str}; use std::{fmt, str};
@ -100,14 +100,14 @@ impl Store {
/// Only one read-write [`Store`] can exist at the same time. /// Only one read-write [`Store`] can exist at the same time.
/// If you want to have extra [`Store`] instance opened on a same data /// If you want to have extra [`Store`] instance opened on a same data
/// use [`Store::open_read_only`]. /// use [`Store::open_read_only`].
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn open(path: impl AsRef<Path>) -> Result<Self, StorageError> { pub fn open(path: impl AsRef<Path>) -> Result<Self, StorageError> {
Ok(Self { Ok(Self {
storage: Storage::open(path.as_ref(), None)?, storage: Storage::open(path.as_ref(), None)?,
}) })
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn open_with_key(path: impl AsRef<Path>, key: [u8; 32]) -> Result<Self, StorageError> { pub fn open_with_key(path: impl AsRef<Path>, key: [u8; 32]) -> Result<Self, StorageError> {
Ok(Self { Ok(Self {
storage: Storage::open(path.as_ref(), Some(key))?, storage: Storage::open(path.as_ref(), Some(key))?,
@ -124,7 +124,7 @@ impl Store {
// /// If you prefer persistent storage use [`Store::open_persistent_secondary`]. // /// If you prefer persistent storage use [`Store::open_persistent_secondary`].
// /// // ///
// /// If you want to simple read-only [`Store`] use [`Store::open_read_only`]. // /// If you want to simple read-only [`Store`] use [`Store::open_read_only`].
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// pub fn open_secondary(primary_path: impl AsRef<Path>) -> Result<Self, StorageError> { // pub fn open_secondary(primary_path: impl AsRef<Path>) -> Result<Self, StorageError> {
// Ok(Self { // Ok(Self {
// storage: Storage::open_secondary(primary_path.as_ref())?, // storage: Storage::open_secondary(primary_path.as_ref())?,
@ -139,7 +139,7 @@ impl Store {
/// `primary_path` must be the path of the primary instance and `secondary_path` an other directory for the secondary instance cache. /// `primary_path` must be the path of the primary instance and `secondary_path` an other directory for the secondary instance cache.
/// ///
/// If you want to simple read-only [`Store`] use [`Store::open_read_only`]. /// If you want to simple read-only [`Store`] use [`Store::open_read_only`].
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// pub fn open_persistent_secondary( // pub fn open_persistent_secondary(
// primary_path: impl AsRef<Path>, // primary_path: impl AsRef<Path>,
// secondary_path: impl AsRef<Path>, // secondary_path: impl AsRef<Path>,
@ -155,7 +155,7 @@ impl Store {
/// Opens a read-only [`Store`] from disk. /// Opens a read-only [`Store`] from disk.
/// ///
/// Opening as read-only while having an other process writing the database is undefined behavior. /// Opening as read-only while having an other process writing the database is undefined behavior.
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn open_read_only( pub fn open_read_only(
path: impl AsRef<Path>, path: impl AsRef<Path>,
key: Option<[u8; 32]>, key: Option<[u8; 32]>,
@ -939,7 +939,7 @@ impl Store {
/// Flushes all buffers and ensures that all writes are saved on disk. /// Flushes all buffers and ensures that all writes are saved on disk.
/// ///
/// Flushes are automatically done using background threads but might lag a little bit. /// Flushes are automatically done using background threads but might lag a little bit.
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn flush(&self) -> Result<(), StorageError> { pub fn flush(&self) -> Result<(), StorageError> {
self.storage.flush() self.storage.flush()
} }
@ -949,7 +949,7 @@ impl Store {
/// Useful to call after a batch upload or another similar operation. /// Useful to call after a batch upload or another similar operation.
/// ///
/// <div class="warning">Can take hours on huge databases.</div> /// <div class="warning">Can take hours on huge databases.</div>
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn optimize(&self) -> Result<(), StorageError> { pub fn optimize(&self) -> Result<(), StorageError> {
self.storage.compact() self.storage.compact()
} }
@ -972,7 +972,7 @@ impl Store {
/// This allows cheap regular backups. /// This allows cheap regular backups.
/// ///
/// If you want to move your data to another RDF storage system, you should have a look at the [`Store::dump_to_write`] function instead. /// If you want to move your data to another RDF storage system, you should have a look at the [`Store::dump_to_write`] function instead.
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn backup(&self, target_directory: impl AsRef<Path>) -> Result<(), StorageError> { pub fn backup(&self, target_directory: impl AsRef<Path>) -> Result<(), StorageError> {
self.storage.backup(target_directory.as_ref()) self.storage.backup(target_directory.as_ref())
} }
@ -999,7 +999,7 @@ impl Store {
/// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?); /// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?);
/// # Result::<_, Box<dyn std::error::Error>>::Ok(()) /// # Result::<_, Box<dyn std::error::Error>>::Ok(())
/// ``` /// ```
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
pub fn bulk_loader(&self) -> BulkLoader { pub fn bulk_loader(&self) -> BulkLoader {
BulkLoader { BulkLoader {
storage: StorageBulkLoader::new(self.storage.clone()), storage: StorageBulkLoader::new(self.storage.clone()),
@ -1617,14 +1617,14 @@ impl Iterator for GraphNameIter {
/// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?); /// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?);
/// # Result::<_, Box<dyn std::error::Error>>::Ok(()) /// # Result::<_, Box<dyn std::error::Error>>::Ok(())
/// ``` /// ```
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
#[must_use] #[must_use]
pub struct BulkLoader { pub struct BulkLoader {
storage: StorageBulkLoader, storage: StorageBulkLoader,
on_parse_error: Option<Box<dyn Fn(RdfParseError) -> Result<(), RdfParseError>>>, on_parse_error: Option<Box<dyn Fn(RdfParseError) -> Result<(), RdfParseError>>>,
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
impl BulkLoader { impl BulkLoader {
/// Sets the maximal number of threads to be used by the bulk loader per operation. /// Sets the maximal number of threads to be used by the bulk loader per operation.
/// ///

@ -5,20 +5,20 @@ use ng_oxigraph::oxigraph::io::RdfFormat;
use ng_oxigraph::oxigraph::model::vocab::{rdf, xsd}; use ng_oxigraph::oxigraph::model::vocab::{rdf, xsd};
use ng_oxigraph::oxigraph::model::*; use ng_oxigraph::oxigraph::model::*;
use ng_oxigraph::oxigraph::store::Store; use ng_oxigraph::oxigraph::store::Store;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use rand::random; use rand::random;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::env::temp_dir; use std::env::temp_dir;
use std::error::Error; use std::error::Error;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::fs::{create_dir_all, remove_dir_all, File}; use std::fs::{create_dir_all, remove_dir_all, File};
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::io::Write; use std::io::Write;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::iter::empty; use std::iter::empty;
#[cfg(all(target_os = "linux"))] #[cfg(all(target_os = "linux"))]
use std::iter::once; use std::iter::once;
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
#[cfg(all(target_os = "linux"))] #[cfg(all(target_os = "linux"))]
use std::process::Command; use std::process::Command;
@ -121,7 +121,7 @@ fn test_load_graph() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_bulk_load_graph() -> Result<(), Box<dyn Error>> { fn test_bulk_load_graph() -> Result<(), Box<dyn Error>> {
let store = Store::new()?; let store = Store::new()?;
store store
@ -135,7 +135,7 @@ fn test_bulk_load_graph() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_bulk_load_graph_lenient() -> Result<(), Box<dyn Error>> { fn test_bulk_load_graph_lenient() -> Result<(), Box<dyn Error>> {
let store = Store::new()?; let store = Store::new()?;
store.bulk_loader().on_parse_error(|_| Ok(())).load_from_read( store.bulk_loader().on_parse_error(|_| Ok(())).load_from_read(
@ -154,7 +154,7 @@ fn test_bulk_load_graph_lenient() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_bulk_load_empty() -> Result<(), Box<dyn Error>> { fn test_bulk_load_empty() -> Result<(), Box<dyn Error>> {
let store = Store::new()?; let store = Store::new()?;
store.bulk_loader().load_quads(empty::<Quad>())?; store.bulk_loader().load_quads(empty::<Quad>())?;
@ -177,7 +177,7 @@ fn test_load_dataset() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_bulk_load_dataset() -> Result<(), Box<dyn Error>> { fn test_bulk_load_dataset() -> Result<(), Box<dyn Error>> {
let store = Store::new()?; let store = Store::new()?;
store store
@ -258,7 +258,7 @@ fn test_snapshot_isolation_iterator() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box<dyn Error>> { fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box<dyn Error>> {
let quad = QuadRef::new( let quad = QuadRef::new(
NamedNodeRef::new_unchecked("http://example.com/s"), NamedNodeRef::new_unchecked("http://example.com/s"),
@ -274,7 +274,7 @@ fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box<dy
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_open_bad_dir() -> Result<(), Box<dyn Error>> { fn test_open_bad_dir() -> Result<(), Box<dyn Error>> {
let dir = TempDir::default(); let dir = TempDir::default();
create_dir_all(&dir.0)?; create_dir_all(&dir.0)?;
@ -304,7 +304,7 @@ fn test_bad_stt_open() -> Result<(), Box<dyn Error>> {
} }
// #[test] // #[test]
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// fn test_backup() -> Result<(), Box<dyn Error>> { // fn test_backup() -> Result<(), Box<dyn Error>> {
// let quad = QuadRef::new( // let quad = QuadRef::new(
// NamedNodeRef::new_unchecked("http://example.com/s"), // NamedNodeRef::new_unchecked("http://example.com/s"),
@ -344,7 +344,7 @@ fn test_bad_stt_open() -> Result<(), Box<dyn Error>> {
// } // }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_bad_backup() -> Result<(), Box<dyn Error>> { fn test_bad_backup() -> Result<(), Box<dyn Error>> {
let store_dir = TempDir::default(); let store_dir = TempDir::default();
let backup_dir = TempDir::default(); let backup_dir = TempDir::default();
@ -355,7 +355,7 @@ fn test_bad_backup() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_backup_on_in_memory() -> Result<(), Box<dyn Error>> { fn test_backup_on_in_memory() -> Result<(), Box<dyn Error>> {
let backup_dir = TempDir::default(); let backup_dir = TempDir::default();
Store::new()?.backup(&backup_dir).unwrap_err(); Store::new()?.backup(&backup_dir).unwrap_err();
@ -387,7 +387,7 @@ fn test_backward_compatibility() -> Result<(), Box<dyn Error>> {
} }
// #[test] // #[test]
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// fn test_secondary() -> Result<(), Box<dyn Error>> { // fn test_secondary() -> Result<(), Box<dyn Error>> {
// let quad = QuadRef::new( // let quad = QuadRef::new(
// NamedNodeRef::new_unchecked("http://example.com/s"), // NamedNodeRef::new_unchecked("http://example.com/s"),
@ -430,7 +430,7 @@ fn test_backward_compatibility() -> Result<(), Box<dyn Error>> {
// } // }
// #[test] // #[test]
// #[cfg(all(not(target_family = "wasm")))] // #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
// fn test_open_secondary_bad_dir() -> Result<(), Box<dyn Error>> { // fn test_open_secondary_bad_dir() -> Result<(), Box<dyn Error>> {
// let primary_dir = TempDir::default(); // let primary_dir = TempDir::default();
// create_dir_all(&primary_dir.0)?; // create_dir_all(&primary_dir.0)?;
@ -442,7 +442,7 @@ fn test_backward_compatibility() -> Result<(), Box<dyn Error>> {
// } // }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_read_only() -> Result<(), Box<dyn Error>> { fn test_read_only() -> Result<(), Box<dyn Error>> {
let s = NamedNodeRef::new_unchecked("http://example.com/s"); let s = NamedNodeRef::new_unchecked("http://example.com/s");
let p = NamedNodeRef::new_unchecked("http://example.com/p"); let p = NamedNodeRef::new_unchecked("http://example.com/p");
@ -491,7 +491,7 @@ fn test_read_only() -> Result<(), Box<dyn Error>> {
} }
#[test] #[test]
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
fn test_open_read_only_bad_dir() -> Result<(), Box<dyn Error>> { fn test_open_read_only_bad_dir() -> Result<(), Box<dyn Error>> {
let dir = TempDir::default(); let dir = TempDir::default();
create_dir_all(&dir.0)?; create_dir_all(&dir.0)?;
@ -515,24 +515,24 @@ fn reset_dir(dir: &str) -> Result<(), Box<dyn Error>> {
Ok(()) Ok(())
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
struct TempDir(PathBuf); struct TempDir(PathBuf);
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
impl Default for TempDir { impl Default for TempDir {
fn default() -> Self { fn default() -> Self {
Self(temp_dir().join(format!("oxigraph-test-{}", random::<u128>()))) Self(temp_dir().join(format!("oxigraph-test-{}", random::<u128>())))
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
impl AsRef<Path> for TempDir { impl AsRef<Path> for TempDir {
fn as_ref(&self) -> &Path { fn as_ref(&self) -> &Path {
&self.0 &self.0
} }
} }
#[cfg(all(not(target_family = "wasm")))] #[cfg(all(not(target_family = "wasm"),not(docsrs)))]
impl Drop for TempDir { impl Drop for TempDir {
fn drop(&mut self) { fn drop(&mut self) {
if self.0.is_dir() { if self.0.is_dir() {

@ -1,6 +1,6 @@
[package] [package]
name = "ng-storage-rocksdb" name = "ng-storage-rocksdb"
version = "0.1.0-preview.5" version = "0.1.0-preview.6"
description = "Stores based on RocksDB for NextGraph" description = "Stores based on RocksDB for NextGraph"
edition.workspace = true edition.workspace = true
license.workspace = true license.workspace = true
@ -19,4 +19,4 @@ ng-repo = { path = "../ng-repo", version = "0.1.0-preview.1" }
git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git" git = "https://git.nextgraph.org/NextGraph/rust-rocksdb.git"
branch = "master" branch = "master"
features = [ ] features = [ ]
version = "0.21.0-ngpreview.2" version = "0.21.0-ngpreview.3"

@ -4,7 +4,7 @@
[![Apache 2.0 Licensed][license-image]][license-link] [![Apache 2.0 Licensed][license-image]][license-link]
[![MIT Licensed][license-image2]][license-link2] [![MIT Licensed][license-image2]][license-link2]
Stores based on RocksDB for NextGraph Storage backend based on RocksDB for NextGraph
This repository is in active development at [https://git.nextgraph.org/NextGraph/nextgraph-rs](https://git.nextgraph.org/NextGraph/nextgraph-rs), a Gitea instance. For bug reports, issues, merge requests, and in order to join the dev team, please visit the link above and create an account (you can do so with a github account). The [github repo](https://github.com/nextgraph-org/nextgraph-rs) is just a read-only mirror that does not accept issues. This repository is in active development at [https://git.nextgraph.org/NextGraph/nextgraph-rs](https://git.nextgraph.org/NextGraph/nextgraph-rs), a Gitea instance. For bug reports, issues, merge requests, and in order to join the dev team, please visit the link above and create an account (you can do so with a github account). The [github repo](https://github.com/nextgraph-org/nextgraph-rs) is just a read-only mirror that does not accept issues.

@ -1,10 +1,12 @@
# Broker library of NextGraph # Verifier library of NextGraph
![MSRV][rustc-image] ![MSRV][rustc-image]
[![Apache 2.0 Licensed][license-image]][license-link] [![Apache 2.0 Licensed][license-image]][license-link]
[![MIT Licensed][license-image2]][license-link2] [![MIT Licensed][license-image2]][license-link2]
Rust client library of NextGraph The verifier is locally decrypting the incoming commits and building the materialized state of the documents.
It serves an API to the Apps that can read, write and query the materialized state.
This repository is in active development at [https://git.nextgraph.org/NextGraph/nextgraph-rs](https://git.nextgraph.org/NextGraph/nextgraph-rs), a Gitea instance. For bug reports, issues, merge requests, and in order to join the dev team, please visit the link above and create an account (you can do so with a github account). The [github repo](https://github.com/nextgraph-org/nextgraph-rs) is just a read-only mirror that does not accept issues. This repository is in active development at [https://git.nextgraph.org/NextGraph/nextgraph-rs](https://git.nextgraph.org/NextGraph/nextgraph-rs), a Gitea instance. For bug reports, issues, merge requests, and in order to join the dev team, please visit the link above and create an account (you can do so with a github account). The [github repo](https://github.com/nextgraph-org/nextgraph-rs) is just a read-only mirror that does not accept issues.

Loading…
Cancel
Save