Makes RocksDB backend optional but enabled by default

pull/811/head
Tpt 10 months ago committed by Thomas Tanon
parent 0b5790a18f
commit e0087c56b3
  1. 2
      .github/workflows/tests.yml
  2. 2
      fuzz/Cargo.toml
  3. 6
      lib/oxigraph/Cargo.toml
  4. 5
      lib/oxigraph/README.md
  5. 8
      lib/oxigraph/src/storage/backend/mod.rs
  6. 2
      lib/oxigraph/src/storage/binary_encoder.rs
  7. 74
      lib/oxigraph/src/storage/mod.rs
  8. 26
      lib/oxigraph/src/store.rs
  9. 56
      lib/oxigraph/tests/store.rs

@ -52,6 +52,8 @@ jobs:
working-directory: ./lib/spargebra
- run: cargo clippy --all-targets -- -D warnings -D clippy::all
working-directory: ./lib/sparopt
- run: cargo clippy --all-targets --no-default-features -- -D warnings -D clippy::all
working-directory: ./lib/oxigraph
- run: cargo clippy --all-targets -- -D warnings -D clippy::all
working-directory: ./lib/oxigraph
- run: cargo clippy --all-targets -- -D warnings -D clippy::all

@ -10,7 +10,7 @@ cargo-fuzz = true
[dependencies]
anyhow = "1.0.72"
libfuzzer-sys = "0.4"
oxigraph = { path = "../lib/oxigraph" }
oxigraph = { path = "../lib/oxigraph", default-features = false }
oxrdf = { path = "../lib/oxrdf", features = ["rdf-star"] }
oxrdfxml = { path = "../lib/oxrdfxml" }
oxttl = { path = "../lib/oxttl", features = ["rdf-star"] }

@ -16,7 +16,8 @@ edition.workspace = true
rust-version.workspace = true
[features]
default = []
default = ["rocksdb"]
rocksdb = ["oxrocksdb-sys"]
js = ["getrandom/js", "oxsdatatypes/js", "js-sys"]
http-client = ["oxhttp"]
http-client-native-tls = ["http-client", "oxhttp/native-tls"]
@ -48,7 +49,7 @@ thiserror.workspace = true
[target.'cfg(not(target_family = "wasm"))'.dependencies]
libc.workspace = true
oxhttp = { workspace = true, optional = true }
oxrocksdb-sys.workspace = true
oxrocksdb-sys = { workspace = true, optional = true }
[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies]
getrandom.workspace = true
@ -68,3 +69,4 @@ rustdoc-args = ["--cfg", "docsrs"]
[[bench]]
name = "store"
harness = false
required-features = ["rocksdb"]

@ -59,6 +59,11 @@ It is based on these crates that can be used separately:
To build the library locally, don't forget to clone the submodules using `git clone --recursive https://github.com/oxigraph/oxigraph.git` to clone the repository including submodules or `git submodule update --init` to add submodules to the already cloned repository.
It is possible to disable the RocksDB storage backend to only use the in-memory fallback by disabling the `rocksdb` default feature:
```toml
oxigraph = { version = "*", default-features = false }
```
This is the default behavior when compiling RocksDB to WASM.
## License

@ -1,12 +1,12 @@
//! A storage backend
//! RocksDB is available, if not in memory
#[cfg(target_family = "wasm")]
#[cfg(any(target_family = "wasm", not(feature = "rocksdb")))]
pub use fallback::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub use rocksdb::{ColumnFamily, ColumnFamilyDefinition, Db, Iter, Reader, Transaction};
#[cfg(target_family = "wasm")]
#[cfg(any(target_family = "wasm", not(feature = "rocksdb")))]
mod fallback;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
mod rocksdb;

@ -5,7 +5,7 @@ use oxsdatatypes::*;
use std::io::Read;
use std::mem::size_of;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub const LATEST_STORAGE_VERSION: u64 = 1;
pub const WRITTEN_TERM_MAX_SIZE: usize = size_of::<u8>() + 2 * size_of::<StrHash>();

@ -1,9 +1,9 @@
#![allow(clippy::same_name_method)]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use crate::model::Quad;
use crate::model::{GraphNameRef, NamedOrBlankNodeRef, QuadRef, TermRef};
use crate::storage::backend::{Reader, Transaction};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use crate::storage::binary_encoder::LATEST_STORAGE_VERSION;
use crate::storage::binary_encoder::{
decode_term, encode_term, encode_term_pair, encode_term_quad, encode_term_triple,
@ -12,22 +12,22 @@ use crate::storage::binary_encoder::{
WRITTEN_TERM_MAX_SIZE,
};
pub use crate::storage::error::{CorruptionError, LoaderError, SerializerError, StorageError};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use crate::storage::numeric_encoder::Decoder;
use crate::storage::numeric_encoder::{insert_term, EncodedQuad, EncodedTerm, StrHash, StrLookup};
use backend::{ColumnFamily, ColumnFamilyDefinition, Db, Iter};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::collections::VecDeque;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::collections::{HashMap, HashSet};
use std::error::Error;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::mem::{swap, take};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::path::{Path, PathBuf};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::sync::Mutex;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::{io, thread};
mod backend;
@ -47,16 +47,16 @@ const DSPO_CF: &str = "dspo";
const DPOS_CF: &str = "dpos";
const DOSP_CF: &str = "dosp";
const GRAPHS_CF: &str = "graphs";
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
const DEFAULT_CF: &str = "default";
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
const DEFAULT_BULK_LOAD_BATCH_SIZE: usize = 1_000_000;
/// Low level storage primitives
#[derive(Clone)]
pub struct Storage {
db: Db,
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
default_cf: ColumnFamily,
id2str_cf: ColumnFamily,
spog_cf: ColumnFamily,
@ -76,12 +76,12 @@ impl Storage {
Self::setup(Db::new(Self::column_families())?)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open(path: &Path) -> Result<Self, StorageError> {
Self::setup(Db::open_read_write(Some(path), Self::column_families())?)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open_secondary(primary_path: &Path) -> Result<Self, StorageError> {
Self::setup(Db::open_secondary(
primary_path,
@ -90,7 +90,7 @@ impl Storage {
)?)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open_persistent_secondary(
primary_path: &Path,
secondary_path: &Path,
@ -102,7 +102,7 @@ impl Storage {
)?)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open_read_only(path: &Path) -> Result<Self, StorageError> {
Self::setup(Db::open_read_only(path, Self::column_families())?)
}
@ -180,7 +180,7 @@ impl Storage {
fn setup(db: Db) -> Result<Self, StorageError> {
let this = Self {
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
default_cf: db.column_family(DEFAULT_CF)?,
id2str_cf: db.column_family(ID2STR_CF)?,
spog_cf: db.column_family(SPOG_CF)?,
@ -195,12 +195,12 @@ impl Storage {
graphs_cf: db.column_family(GRAPHS_CF)?,
db,
};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
this.migrate()?;
Ok(this)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn migrate(&self) -> Result<(), StorageError> {
let mut version = self.ensure_version()?;
if version == 0 {
@ -240,7 +240,7 @@ impl Storage {
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn ensure_version(&self) -> Result<u64, StorageError> {
Ok(
if let Some(version) = self.db.get(&self.default_cf, b"oxversion")? {
@ -254,7 +254,7 @@ impl Storage {
)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn update_version(&self, version: u64) -> Result<(), StorageError> {
self.db
.insert(&self.default_cf, b"oxversion", &version.to_be_bytes())?;
@ -281,12 +281,12 @@ impl Storage {
})
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn flush(&self) -> Result<(), StorageError> {
self.db.flush()
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn compact(&self) -> Result<(), StorageError> {
self.db.compact(&self.default_cf)?;
self.db.compact(&self.gspo_cf)?;
@ -301,7 +301,7 @@ impl Storage {
self.db.compact(&self.id2str_cf)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn backup(&self, target_directory: &Path) -> Result<(), StorageError> {
self.db.backup(target_directory)
}
@ -626,7 +626,7 @@ impl StorageReader {
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn get_str(&self, key: &StrHash) -> Result<Option<String>, StorageError> {
Ok(self
.storage
@ -637,7 +637,7 @@ impl StorageReader {
.map_err(CorruptionError::new)?)
}
#[cfg(target_family = "wasm")]
#[cfg(any(target_family = "wasm", not(feature = "rocksdb")))]
pub fn get_str(&self, key: &StrHash) -> Result<Option<String>, StorageError> {
Ok(self
.reader
@ -647,21 +647,21 @@ impl StorageReader {
.map_err(CorruptionError::new)?)
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn contains_str(&self, key: &StrHash) -> Result<bool, StorageError> {
self.storage
.db
.contains_key(&self.storage.id2str_cf, &key.to_be_bytes())
}
#[cfg(target_family = "wasm")]
#[cfg(any(target_family = "wasm", not(feature = "rocksdb")))]
pub fn contains_str(&self, key: &StrHash) -> Result<bool, StorageError> {
self.reader
.contains_key(&self.storage.id2str_cf, &key.to_be_bytes())
}
/// Validates that all the storage invariants held in the data
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn validate(&self) -> Result<(), StorageError> {
// triples
let dspo_size = self.dspo_quads(&[]).count();
@ -773,7 +773,7 @@ impl StorageReader {
}
/// Validates that all the storage invariants held in the data
#[cfg(target_family = "wasm")]
#[cfg(any(target_family = "wasm", not(feature = "rocksdb")))]
#[allow(clippy::unused_self, clippy::unnecessary_wraps)]
pub fn validate(&self) -> Result<(), StorageError> {
Ok(()) // TODO
@ -997,7 +997,7 @@ impl<'a> StorageWriter<'a> {
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> {
if self
.storage
@ -1013,7 +1013,7 @@ impl<'a> StorageWriter<'a> {
)
}
#[cfg(target_family = "wasm")]
#[cfg(any(target_family = "wasm", not(feature = "rocksdb")))]
fn insert_str(&mut self, key: &StrHash, value: &str) -> Result<(), StorageError> {
self.transaction.insert(
&self.storage.id2str_cf,
@ -1178,7 +1178,7 @@ impl<'a> StorageWriter<'a> {
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
#[must_use]
pub struct StorageBulkLoader {
storage: Storage,
@ -1187,7 +1187,7 @@ pub struct StorageBulkLoader {
max_memory_size: Option<usize>,
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
impl StorageBulkLoader {
pub fn new(storage: Storage) -> Self {
Self {
@ -1318,7 +1318,7 @@ impl StorageBulkLoader {
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
struct FileBulkLoader<'a> {
storage: &'a Storage,
id2str: HashMap<StrHash, Box<str>>,
@ -1327,7 +1327,7 @@ struct FileBulkLoader<'a> {
graphs: HashSet<EncodedTerm>,
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
impl<'a> FileBulkLoader<'a> {
fn new(storage: &'a Storage, batch_size: usize) -> Self {
Self {
@ -1533,7 +1533,7 @@ impl<'a> FileBulkLoader<'a> {
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn map_thread_result<R>(result: thread::Result<R>) -> io::Result<R> {
result.map_err(|e| {
io::Error::new(

@ -25,7 +25,7 @@
//! };
//! # Result::<_, Box<dyn std::error::Error>>::Ok(())
//! ```
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use crate::io::RdfParseError;
use crate::io::{RdfFormat, RdfParser, RdfSerializer};
use crate::model::*;
@ -34,7 +34,7 @@ use crate::sparql::{
QueryResults, Update, UpdateOptions,
};
use crate::storage::numeric_encoder::{Decoder, EncodedQuad, EncodedTerm};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use crate::storage::StorageBulkLoader;
use crate::storage::{
ChainedDecodingQuadIterator, DecodingGraphIterator, Storage, StorageReader, StorageWriter,
@ -42,7 +42,7 @@ use crate::storage::{
pub use crate::storage::{CorruptionError, LoaderError, SerializerError, StorageError};
use std::error::Error;
use std::io::{Read, Write};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::path::Path;
use std::{fmt, str};
@ -100,7 +100,7 @@ impl Store {
/// Only one read-write [`Store`] can exist at the same time.
/// If you want to have extra [`Store`] instance opened on a same data
/// use [`Store::open_secondary`] or [`Store::open_read_only`].
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open(path: impl AsRef<Path>) -> Result<Self, StorageError> {
Ok(Self {
storage: Storage::open(path.as_ref())?,
@ -117,7 +117,7 @@ impl Store {
/// If you prefer persistent storage use [`Store::open_persistent_secondary`].
///
/// If you want to simple read-only [`Store`] use [`Store::open_read_only`].
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open_secondary(primary_path: impl AsRef<Path>) -> Result<Self, StorageError> {
Ok(Self {
storage: Storage::open_secondary(primary_path.as_ref())?,
@ -132,7 +132,7 @@ impl Store {
/// `primary_path` must be the path of the primary instance and `secondary_path` an other directory for the secondary instance cache.
///
/// If you want to simple read-only [`Store`] use [`Store::open_read_only`].
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open_persistent_secondary(
primary_path: impl AsRef<Path>,
secondary_path: impl AsRef<Path>,
@ -149,7 +149,7 @@ impl Store {
///
/// Opening as read-only while having an other process writing the database is undefined behavior.
/// [`Store::open_secondary`] should be used in this case.
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn open_read_only(path: impl AsRef<Path>) -> Result<Self, StorageError> {
Ok(Self {
storage: Storage::open_read_only(path.as_ref())?,
@ -930,7 +930,7 @@ impl Store {
/// Flushes all buffers and ensures that all writes are saved on disk.
///
/// Flushes are automatically done using background threads but might lag a little bit.
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn flush(&self) -> Result<(), StorageError> {
self.storage.flush()
}
@ -940,7 +940,7 @@ impl Store {
/// Useful to call after a batch upload or another similar operation.
///
/// <div class="warning">Can take hours on huge databases.</div>
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn optimize(&self) -> Result<(), StorageError> {
self.storage.compact()
}
@ -963,7 +963,7 @@ impl Store {
/// This allows cheap regular backups.
///
/// If you want to move your data to another RDF storage system, you should have a look at the [`Store::dump_to_write`] function instead.
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn backup(&self, target_directory: impl AsRef<Path>) -> Result<(), StorageError> {
self.storage.backup(target_directory.as_ref())
}
@ -990,7 +990,7 @@ impl Store {
/// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?);
/// # Result::<_, Box<dyn std::error::Error>>::Ok(())
/// ```
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
pub fn bulk_loader(&self) -> BulkLoader {
BulkLoader {
storage: StorageBulkLoader::new(self.storage.clone()),
@ -1608,14 +1608,14 @@ impl Iterator for GraphNameIter {
/// assert!(store.contains(QuadRef::new(ex, ex, ex, ex))?);
/// # Result::<_, Box<dyn std::error::Error>>::Ok(())
/// ```
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
#[must_use]
pub struct BulkLoader {
storage: StorageBulkLoader,
on_parse_error: Option<Box<dyn Fn(RdfParseError) -> Result<(), RdfParseError>>>,
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
impl BulkLoader {
/// Sets the maximal number of threads to be used by the bulk loader per operation.
///

@ -5,22 +5,22 @@ use oxigraph::io::RdfFormat;
use oxigraph::model::vocab::{rdf, xsd};
use oxigraph::model::*;
use oxigraph::store::Store;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use rand::random;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::env::temp_dir;
use std::error::Error;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::fs::{create_dir_all, remove_dir_all, File};
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::io::Write;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::iter::empty;
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", feature = "rocksdb"))]
use std::iter::once;
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
use std::path::{Path, PathBuf};
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", feature = "rocksdb"))]
use std::process::Command;
#[allow(clippy::non_ascii_literal)]
@ -121,7 +121,7 @@ fn test_load_graph() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_bulk_load_graph() -> Result<(), Box<dyn Error>> {
let store = Store::new()?;
store
@ -135,7 +135,7 @@ fn test_bulk_load_graph() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_bulk_load_graph_lenient() -> Result<(), Box<dyn Error>> {
let store = Store::new()?;
store.bulk_loader().on_parse_error(|_| Ok(())).load_from_read(
@ -154,7 +154,7 @@ fn test_bulk_load_graph_lenient() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_bulk_load_empty() -> Result<(), Box<dyn Error>> {
let store = Store::new()?;
store.bulk_loader().load_quads(empty::<Quad>())?;
@ -177,7 +177,7 @@ fn test_load_dataset() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_bulk_load_dataset() -> Result<(), Box<dyn Error>> {
let store = Store::new()?;
store
@ -258,7 +258,7 @@ fn test_snapshot_isolation_iterator() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box<dyn Error>> {
let quad = QuadRef::new(
NamedNodeRef::new_unchecked("http://example.com/s"),
@ -274,7 +274,7 @@ fn test_bulk_load_on_existing_delete_overrides_the_delete() -> Result<(), Box<dy
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_open_bad_dir() -> Result<(), Box<dyn Error>> {
let dir = TempDir::default();
create_dir_all(&dir.0)?;
@ -286,7 +286,7 @@ fn test_open_bad_dir() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", feature = "rocksdb"))]
fn test_bad_stt_open() -> Result<(), Box<dyn Error>> {
let dir = TempDir::default();
let store = Store::open(&dir.0)?;
@ -304,7 +304,7 @@ fn test_bad_stt_open() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_backup() -> Result<(), Box<dyn Error>> {
let quad = QuadRef::new(
NamedNodeRef::new_unchecked("http://example.com/s"),
@ -344,7 +344,7 @@ fn test_backup() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_bad_backup() -> Result<(), Box<dyn Error>> {
let store_dir = TempDir::default();
let backup_dir = TempDir::default();
@ -355,7 +355,7 @@ fn test_bad_backup() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_backup_on_in_memory() -> Result<(), Box<dyn Error>> {
let backup_dir = TempDir::default();
Store::new()?.backup(&backup_dir).unwrap_err();
@ -363,7 +363,7 @@ fn test_backup_on_in_memory() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", feature = "rocksdb"))]
fn test_backward_compatibility() -> Result<(), Box<dyn Error>> {
// We run twice to check if data is properly saved and closed
for _ in 0..2 {
@ -387,7 +387,7 @@ fn test_backward_compatibility() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_secondary() -> Result<(), Box<dyn Error>> {
let quad = QuadRef::new(
NamedNodeRef::new_unchecked("http://example.com/s"),
@ -430,7 +430,7 @@ fn test_secondary() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_open_secondary_bad_dir() -> Result<(), Box<dyn Error>> {
let primary_dir = TempDir::default();
create_dir_all(&primary_dir.0)?;
@ -442,7 +442,7 @@ fn test_open_secondary_bad_dir() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_read_only() -> Result<(), Box<dyn Error>> {
let s = NamedNodeRef::new_unchecked("http://example.com/s");
let p = NamedNodeRef::new_unchecked("http://example.com/p");
@ -491,7 +491,7 @@ fn test_read_only() -> Result<(), Box<dyn Error>> {
}
#[test]
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
fn test_open_read_only_bad_dir() -> Result<(), Box<dyn Error>> {
let dir = TempDir::default();
create_dir_all(&dir.0)?;
@ -502,7 +502,7 @@ fn test_open_read_only_bad_dir() -> Result<(), Box<dyn Error>> {
Ok(())
}
#[cfg(target_os = "linux")]
#[cfg(all(target_os = "linux", feature = "rocksdb"))]
fn reset_dir(dir: &str) -> Result<(), Box<dyn Error>> {
assert!(Command::new("git")
.args(["clean", "-fX", dir])
@ -515,24 +515,24 @@ fn reset_dir(dir: &str) -> Result<(), Box<dyn Error>> {
Ok(())
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
struct TempDir(PathBuf);
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
impl Default for TempDir {
fn default() -> Self {
Self(temp_dir().join(format!("oxigraph-test-{}", random::<u128>())))
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
impl AsRef<Path> for TempDir {
fn as_ref(&self) -> &Path {
&self.0
}
}
#[cfg(not(target_family = "wasm"))]
#[cfg(all(not(target_family = "wasm"), feature = "rocksdb"))]
impl Drop for TempDir {
fn drop(&mut self) {
if self.0.is_dir() {

Loading…
Cancel
Save