diff --git a/Cargo.toml b/Cargo.toml index c900a1b..301b5f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "signet-libmdbx" description = "Idiomatic and safe MDBX wrapper" -version = "0.5.0" +version = "0.6.0" edition = "2024" rust-version = "1.92" license = "MIT OR Apache-2.0" @@ -40,13 +40,6 @@ smallvec = "1.15.1" thiserror = "2.0.18" tracing = "0.1.44" -dashmap = { version = "6.1.0", features = ["inline"], optional = true } - -[features] -default = [] -return-borrowed = [] -read-tx-timeouts = ["dep:dashmap"] - [dev-dependencies] criterion = "0.8.1" proptest = "1" diff --git a/benches/cursor.rs b/benches/cursor.rs index 70f3a6d..fc8ac96 100644 --- a/benches/cursor.rs +++ b/benches/cursor.rs @@ -2,7 +2,7 @@ mod utils; use criterion::{Criterion, criterion_group, criterion_main}; -use signet_libmdbx::{Cursor, ObjectLength, ReadResult, TransactionKind, ffi::*, tx::TxPtrAccess}; +use signet_libmdbx::{Cursor, ObjectLength, ReadResult, TransactionKind, ffi::*}; use std::{hint::black_box, ptr}; use utils::*; @@ -34,9 +34,7 @@ fn bench_get_seq_iter(c: &mut Criterion) { count += 1; } - fn iterate( - cursor: &mut Cursor, - ) -> ReadResult<()> { + fn iterate(cursor: &mut Cursor) -> ReadResult<()> { let mut i = 0; for result in cursor.iter::() { let (key_len, data_len) = result?; @@ -107,7 +105,7 @@ fn bench_get_seq_for_loop(c: &mut Criterion) { fn bench_get_seq_iter_single_thread(c: &mut Criterion) { let n = 100; let (_dir, env) = setup_bench_db(n); - let mut txn = create_ro_unsync(&env); + let txn = create_ro_unsync(&env); let db = txn.open_db(None).unwrap(); // Note: setup_bench_db creates a named database which adds metadata to the // main database, so actual item count is n + 1 @@ -131,9 +129,7 @@ fn bench_get_seq_iter_single_thread(c: &mut Criterion) { count += 1; } - fn iterate( - cursor: &mut Cursor, - ) -> ReadResult<()> { + fn iterate(cursor: &mut Cursor) -> ReadResult<()> { let mut i = 0; for result in cursor.iter::() { let (key_len, data_len) = result?; @@ -154,7 +150,7 @@ fn bench_get_seq_iter_single_thread(c: &mut Criterion) { fn bench_get_seq_cursor_single_thread(c: &mut Criterion) { let n = 100; let (_dir, env) = setup_bench_db(n); - let mut txn = create_ro_unsync(&env); + let txn = create_ro_unsync(&env); let db = txn.open_db(None).unwrap(); // Note: setup_bench_db creates a named database which adds metadata to the // main database, so actual item count is n + 1 @@ -177,7 +173,7 @@ fn bench_get_seq_cursor_single_thread(c: &mut Criterion) { fn bench_get_seq_for_loop_single_thread(c: &mut Criterion) { let n = 100; let (_dir, env) = setup_bench_db(n); - let mut txn = create_ro_unsync(&env); + let txn = create_ro_unsync(&env); let db = txn.open_db(None).unwrap(); // Note: setup_bench_db creates a named database which adds metadata to the // main database, so actual item count is n + 1 @@ -205,9 +201,6 @@ fn bench_get_seq_raw(c: &mut Criterion) { let n = 100; let (_dir, env) = setup_bench_db(n); - let dbi = create_ro_sync(&env).open_db(None).unwrap().dbi(); - let txn = create_ro_sync(&env); - let mut key = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let mut data = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let mut cursor: *mut MDBX_cursor = ptr::null_mut(); @@ -216,23 +209,36 @@ fn bench_get_seq_raw(c: &mut Criterion) { // main database, so actual item count is n + 1 let actual_items = n + 1; + let txn = unsafe { + let mut txn: *mut MDBX_txn = ptr::null_mut(); + env.with_raw_env_ptr(|env_ptr| { + txn = create_ro_raw(env_ptr); + }); + txn + }; + + let mut dbi: MDBX_dbi = 0; + unsafe { + match mdbx_dbi_open(txn, ptr::null(), 0, &mut dbi) { + MDBX_SUCCESS | MDBX_RESULT_TRUE => {} + err => panic!("mdbx_dbi_open failed: {}", err), + } + }; + c.bench_function("cursor::traverse::raw", |b| { b.iter(|| unsafe { - txn.txn_execute(|txn| { - mdbx_cursor_open(txn, dbi, &raw mut cursor); - let mut i = 0; - let mut count = 0u32; + mdbx_cursor_open(txn, dbi, &raw mut cursor); + let mut i = 0; + let mut count = 0u32; - while mdbx_cursor_get(cursor, &raw mut key, &raw mut data, MDBX_NEXT) == 0 { - i += key.iov_len + data.iov_len; - count += 1; - } + while mdbx_cursor_get(cursor, &raw mut key, &raw mut data, MDBX_NEXT) == 0 { + i += key.iov_len + data.iov_len; + count += 1; + } - black_box(i); - assert_eq!(count, actual_items); - mdbx_cursor_close(cursor); - }) - .unwrap(); + black_box(i); + assert_eq!(count, actual_items); + mdbx_cursor_close(cursor); }) }); } diff --git a/benches/db_open.rs b/benches/db_open.rs index f70d8fe..48217e4 100644 --- a/benches/db_open.rs +++ b/benches/db_open.rs @@ -9,18 +9,27 @@ use std::{hint::black_box, ptr}; /// Benchmark mdbx_dbi_flags_ex in isolation (on already-open DBI) fn bench_dbi_flags_ex_only(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let dbi = db.dbi(); + + let txn = unsafe { + let mut txn: *mut MDBX_txn = ptr::null_mut(); + env.with_raw_env_ptr(|env_ptr| { + txn = create_ro_raw(env_ptr); + }); + txn + }; + + let mut dbi: MDBX_dbi = 0; + let flags = 0; + match unsafe { mdbx_dbi_open(txn, ptr::null(), flags, &mut dbi) } { + MDBX_SUCCESS | MDBX_RESULT_TRUE => {} + err => panic!("mdbx_dbi_open failed: {}", err), + }; c.bench_function("db_cache::ffi::flags", |b| { b.iter(|| { - txn.txn_execute(|txn_ptr| unsafe { - let mut flags: u32 = 0; - let mut state: u32 = 0; - black_box(mdbx_dbi_flags_ex(txn_ptr.cast_const(), dbi, &mut flags, &mut state)); - }) - .unwrap(); + let mut flags: u32 = 0; + let mut state: u32 = 0; + black_box(unsafe { mdbx_dbi_flags_ex(txn.cast_const(), dbi, &mut flags, &mut state) }); }) }); } @@ -28,15 +37,24 @@ fn bench_dbi_flags_ex_only(c: &mut Criterion) { /// Baseline: just mdbx_dbi_open without flags_ex fn bench_dbi_open_only(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); + + let txn = unsafe { + let mut txn: *mut MDBX_txn = ptr::null_mut(); + env.with_raw_env_ptr(|env_ptr| { + txn = create_ro_raw(env_ptr); + }); + txn + }; c.bench_function("db_cache::ffi::open", |b| { b.iter(|| { - txn.txn_execute(|txn_ptr| unsafe { - let mut dbi: MDBX_dbi = 0; - black_box(mdbx_dbi_open(txn_ptr, ptr::null(), 0, &mut dbi)); - }) - .unwrap(); + let mut dbi: MDBX_dbi = 0; + let flags = 0; + black_box(match unsafe { mdbx_dbi_open(txn, ptr::null(), flags, &mut dbi) } { + MDBX_SUCCESS => false, + MDBX_RESULT_TRUE => true, + _ => panic!(), + }); }) }); } @@ -44,27 +62,37 @@ fn bench_dbi_open_only(c: &mut Criterion) { /// Full open path: mdbx_dbi_open + mdbx_dbi_flags_ex fn bench_dbi_open_with_flags_ex(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); + + let txn = unsafe { + let mut txn: *mut MDBX_txn = ptr::null_mut(); + env.with_raw_env_ptr(|env_ptr| { + txn = create_ro_raw(env_ptr); + }); + txn + }; c.bench_function("db_cache::ffi::open_plus_flags", |b| { b.iter(|| { - txn.txn_execute(|txn_ptr| unsafe { - let mut dbi: MDBX_dbi = 0; - mdbx_dbi_open(txn_ptr, ptr::null(), 0, &mut dbi); - let mut flags: u32 = 0; - let mut state: u32 = 0; - black_box(mdbx_dbi_flags_ex(txn_ptr.cast_const(), dbi, &mut flags, &mut state)); - }) - .unwrap(); - }) + let mut dbi: MDBX_dbi = 0; + let flags = 0; + black_box(match unsafe { mdbx_dbi_open(txn, ptr::null(), flags, &mut dbi) } { + MDBX_SUCCESS => false, + MDBX_RESULT_TRUE => true, + _ => panic!(), + }); + let mut flags: u32 = 0; + let mut state: u32 = 0; + black_box(unsafe { mdbx_dbi_flags_ex(txn.cast_const(), dbi, &mut flags, &mut state) }); + }); }); } /// Benchmark cached DB opens (cache hits after first call) fn bench_open_db_cached(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); + // Prime the cache + let txn = env.begin_ro_unsync().unwrap(); let _ = txn.open_db(None).unwrap(); c.bench_function("db_cache::unnamed::hit", |b| { @@ -75,7 +103,7 @@ fn bench_open_db_cached(c: &mut Criterion) { /// Benchmark uncached DB opens (always FFI call) fn bench_open_db_no_cache(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_unsync().unwrap(); c.bench_function("db_cache::unnamed::disabled", |b| { b.iter(|| black_box(txn.open_db_no_cache(None).unwrap())) @@ -85,7 +113,7 @@ fn bench_open_db_no_cache(c: &mut Criterion) { /// Benchmark cached DB opens (cache hits after first call) fn bench_open_db_cached_named(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_unsync().unwrap(); // Prime the cache let _ = txn.open_db(Some(NAMED_DB)).unwrap(); @@ -97,7 +125,7 @@ fn bench_open_db_cached_named(c: &mut Criterion) { /// Benchmark uncached DB opens (always FFI call) fn bench_open_db_no_cache_named(c: &mut Criterion) { let (_dir, env) = setup_bench_db(10); - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_unsync().unwrap(); c.bench_function("db_cache::named::disabled", |b| { b.iter(|| black_box(txn.open_db_no_cache(Some(NAMED_DB)).unwrap())) diff --git a/benches/transaction.rs b/benches/transaction.rs index 6996158..6e3f868 100644 --- a/benches/transaction.rs +++ b/benches/transaction.rs @@ -23,21 +23,26 @@ fn bench_get_rand_raw(c: &mut Criterion) { let mut key_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; let mut data_val: MDBX_val = MDBX_val { iov_len: 0, iov_base: ptr::null_mut() }; + let txn: *mut MDBX_txn = unsafe { + let mut txn: *mut MDBX_txn = ptr::null_mut(); + env.with_raw_env_ptr(|env_ptr| { + txn = create_ro_raw(env_ptr); + }); + txn + }; + c.bench_function("transaction::get::rand::raw", |b| { b.iter(|| unsafe { - txn.txn_execute(|txn| { - let mut i = 0; - for key in &keys { - key_val.iov_len = key.len(); - key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast(); + let mut i = 0; + for key in &keys { + key_val.iov_len = key.len(); + key_val.iov_base = key.as_bytes().as_ptr().cast_mut().cast(); - mdbx_get(txn, dbi, &raw const key_val, &raw mut data_val); + mdbx_get(txn, dbi, &raw const key_val, &raw mut data_val); - i += key_val.iov_len; - } - black_box(i); - }) - .unwrap(); + i += key_val.iov_len; + } + black_box(i); }) }); } @@ -65,7 +70,7 @@ fn bench_get_rand_sync(c: &mut Criterion) { fn bench_get_rand_unsync(c: &mut Criterion) { let n = 100u32; let (_dir, env) = setup_bench_db(n); - let mut txn = create_ro_unsync(&env); + let txn = create_ro_unsync(&env); let db = txn.open_db(None).unwrap(); let mut keys: Vec = (0..n).map(get_key).collect(); @@ -84,30 +89,6 @@ fn bench_get_rand_unsync(c: &mut Criterion) { // PUT -fn bench_put_rand_sync(c: &mut Criterion) { - let n = 100u32; - let (_dir, env) = setup_bench_db(0); - - let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); - items.shuffle(&mut StdRng::from_seed(Default::default())); - - c.bench_function("transaction::put::rand", |b| { - b.iter_batched( - || { - let txn = create_rw_sync(&env); - let db = txn.open_db(None).unwrap(); - (txn, db) - }, - |(txn, db)| { - for (key, data) in &items { - txn.put(db, key, data, WriteFlags::empty()).unwrap(); - } - }, - criterion::BatchSize::PerIteration, - ) - }); -} - fn bench_put_rand_raw(c: &mut Criterion) { let n = 100u32; let (_dir, env) = setup_bench_db(0); @@ -147,6 +128,30 @@ fn bench_put_rand_raw(c: &mut Criterion) { }); } +fn bench_put_rand_sync(c: &mut Criterion) { + let n = 100u32; + let (_dir, env) = setup_bench_db(0); + + let mut items: Vec<(String, String)> = (0..n).map(|n| (get_key(n), get_data(n))).collect(); + items.shuffle(&mut StdRng::from_seed(Default::default())); + + c.bench_function("transaction::put::rand", |b| { + b.iter_batched( + || { + let txn = create_rw_sync(&env); + let db = txn.open_db(None).unwrap(); + (txn, db) + }, + |(txn, db)| { + for (key, data) in &items { + txn.put(db, key, data, WriteFlags::empty()).unwrap(); + } + }, + criterion::BatchSize::PerIteration, + ) + }); +} + fn bench_put_rand_unsync(c: &mut Criterion) { let n = 100u32; let (_dir, env) = setup_bench_db(0); @@ -157,11 +162,11 @@ fn bench_put_rand_unsync(c: &mut Criterion) { c.bench_function("transaction::put::rand::single_thread", |b| { b.iter_batched( || { - let mut txn = create_rw_unsync(&env); + let txn = create_rw_unsync(&env); let db = txn.open_db(None).unwrap(); (txn, db) }, - |(mut txn, db)| { + |(txn, db)| { for (key, data) in &items { txn.put(db, key, data, WriteFlags::empty()).unwrap(); } diff --git a/benches/utils.rs b/benches/utils.rs index 38a7b02..a4da0e6 100644 --- a/benches/utils.rs +++ b/benches/utils.rs @@ -54,12 +54,12 @@ pub unsafe fn create_rw_raw(env: *mut MDBX_env) -> *mut MDBX_txn { /// Create a read-only synchronized transaction. pub fn create_ro_sync(env: &Environment) -> RoTxSync { - env.begin_ro_txn().unwrap() + env.begin_ro_sync().unwrap() } /// Create a read-write synchronized transaction. pub fn create_rw_sync(env: &Environment) -> RwTxSync { - env.begin_rw_txn().unwrap() + env.begin_rw_sync().unwrap() } // Unsync transaction utilities @@ -80,7 +80,7 @@ pub fn setup_bench_db(num_rows: u32) -> (TempDir, Environment) { let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap(); { - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); for i in 0..num_rows { txn.put(db, get_key(i), get_data(i), WriteFlags::empty()).unwrap(); diff --git a/src/lib.rs b/src/lib.rs index 3ba2aad..46ed117 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,7 +10,7 @@ //! - Creating and managing memory-mapped database environments //! - Performing transactional read and write operations //! - Iterating over key-value pairs with cursors -//! - Custom serialization via the [`TableObject`] trait +//! - Custom deserialization via the [`TableObject`] trait //! //! # Quick Start //! @@ -34,13 +34,13 @@ //! .open(Path::new("/tmp/my_database"))?; //! //! // Write data in a read-write transaction -//! let txn = env.begin_rw_txn()?; +//! let txn = env.begin_rw_sync()?; //! let db = txn.create_db(None, DatabaseFlags::empty())?; //! txn.put(db, b"hello", b"world", WriteFlags::empty())?; //! txn.commit()?; //! //! // Read data in a read-only transaction -//! let txn = env.begin_ro_txn()?; +//! let txn = env.begin_ro_sync()?; //! let db = txn.open_db(None)?; //! let value: Option> = txn.get(db.dbi(), b"hello").expect("read failed"); //! assert_eq!(value.as_deref(), Some(b"world".as_slice())); @@ -48,6 +48,21 @@ //! Ok(()) //! } //! ``` +//! # Imports +//! +//! For most use cases, import from the crate root: +//! ```rust,ignore +//! use signet_libmdbx::{Environment, DatabaseFlags, WriteFlags, Geometry, MdbxResult}; +//! ``` +//! +//! Transaction and cursor types are returned from `Environment` and transaction +//! methods - you rarely need to import them directly. +//! +//! For advanced usage, import from submodules: +//! - [`tx`] - Transaction type aliases (`RoTxSync`, `RwTxUnsync`, etc.) and +//! cursor type aliases +//! - [`tx::iter`] - Iterator types for cursor iteration +//! - [`sys`] - Environment internals (`EnvironmentKind`, `PageSize`, etc.) //! //! # Key Concepts //! @@ -59,23 +74,17 @@ //! threads. //! - Unsynchronized transactions (`TxUnsync`) offer better //! performance for single-threaded use cases. -//! - [`RO`] and [`RW`] - Marker types indicating read-only (`RO`) or -//! read-write (`RW`) transactions. +//! - [`Ro`] and [`Rw`] - Marker types indicating read-only (`Ro`) or +//! read-write (`Rw`) transactions. +//! - These also exist in sync flavors: [`RoSync`] and [`RwSync`]. //! - [`Database`] - A named or unnamed key-value store within an environment. -//! - Opened with [`TxSync::open_db()`] or [`TxUnsync::open_db()`]. -//! - Created with [`TxSync::create_db()`] or [`TxUnsync::create_db()`]. +//! - Accessed with [`Tx::open_db`]. +//! - or created via [`Tx::create_db`]. //! - [`Cursor`]: Enables iteration and positioned access within a database. //! Created via [`TxSync::cursor()`] or [`TxUnsync::cursor()`]. //! -//! # Feature Flags -//! -//! - `return-borrowed`: When enabled, iterators return borrowed data -//! (`Cow::Borrowed`) whenever possible, avoiding allocations. This is faster -//! but the data may change if the transaction modifies it later, which could -//! trigger undefined behavior. When disabled (default), dirty pages in write -//! transactions trigger copies for safety. -//! - `read-tx-timeouts`: Enables automatic timeout handling for read -//! transactions that block writers. Useful for detecting stuck readers. +//! [`Tx::open_db`]: crate::tx::Tx::open_db +//! [`Tx::create_db`]: crate::tx::Tx::create_db //! //! # Custom Zero-copy Deserialization with [`TableObject`] //! @@ -124,23 +133,6 @@ //! [libmdbx]: https://github.com/erthink/libmdbx //! [reth-libmdbx]: https://github.com/paradigmxyz/reth //! [lmdb-rs]: https://github.com/mozilla/lmdb-rs -//! -//! # Imports -//! -//! For most use cases, import from the crate root: -//! ```rust,ignore -//! use signet_libmdbx::{Environment, DatabaseFlags, WriteFlags, Geometry, MdbxResult}; -//! ``` -//! -//! Transaction and cursor types are returned from `Environment` and transaction -//! methods - you rarely need to import them directly. -//! -//! For advanced usage, import from submodules: -//! - [`tx`] - Transaction type aliases (`RoTxSync`, `RwTxUnsync`, etc.) and -//! cursor type aliases -//! - [`tx::iter`] - Iterator types for cursor iteration -//! - [`sys`] - Environment internals (`EnvironmentKind`, `PageSize`, etc.) -//! #![warn( missing_copy_implementations, @@ -158,10 +150,6 @@ pub extern crate signet_mdbx_sys as ffi; mod codec; pub use codec::{ObjectLength, TableObject, TableObjectOwned}; - -#[cfg(feature = "read-tx-timeouts")] -pub use crate::sys::read_transactions::MaxReadTransactionDuration; - mod error; pub use error::{MdbxError, MdbxResult, ReadError, ReadResult}; @@ -172,7 +160,9 @@ pub mod sys; pub use sys::{Environment, EnvironmentBuilder, Geometry, Info, Stat}; pub mod tx; -pub use tx::{CommitLatency, Cursor, Database, RO, RW, TransactionKind, TxSync, TxUnsync}; +pub use tx::{ + CommitLatency, Cursor, Database, Ro, RoSync, Rw, RwSync, TransactionKind, TxSync, TxUnsync, +}; #[cfg(test)] mod test { @@ -200,7 +190,7 @@ mod test { for height in 0..1000 { let mut value = [0u8; 8]; LittleEndian::write_u64(&mut value, height); - let tx = env.begin_rw_txn().expect("begin_rw_txn"); + let tx = env.begin_rw_sync().expect("begin_rw_sync"); let index = tx.create_db(None, DatabaseFlags::DUP_SORT).expect("open index db"); tx.put(index, HEIGHT_KEY, value, WriteFlags::empty()).expect("tx.put"); tx.commit().expect("tx.commit"); diff --git a/src/sys/environment.rs b/src/sys/environment.rs index cde19d0..47b185a 100644 --- a/src/sys/environment.rs +++ b/src/sys/environment.rs @@ -1,9 +1,9 @@ use crate::{ - Database, Mode, RO, RW, SyncMode, TransactionKind, TxSync, + Database, Mode, SyncMode, error::{MdbxError, MdbxResult, ReadResult, mdbx_result}, flags::EnvironmentFlags, - sys::txn_manager::{RawTxPtr, TxnManager, TxnManagerMessage}, - tx::unsync::TxUnsync, + sys::txn_manager::{LifecycleHandle, RwSyncLifecycle}, + tx::{RoTxSync, RoTxUnsync, RwTxSync, RwTxUnsync}, }; use byteorder::{ByteOrder, NativeEndian}; use mem::size_of; @@ -14,15 +14,9 @@ use std::{ ops::{Bound, RangeBounds}, path::Path, ptr, - sync::{Arc, mpsc::sync_channel}, - thread::sleep, + sync::Arc, time::Duration, }; -use tracing::warn; - -/// The default maximum duration of a read transaction. -#[cfg(feature = "read-tx-timeouts")] -const DEFAULT_MAX_READ_TRANSACTION_DURATION: Duration = Duration::from_secs(5 * 60); /// An environment supports multiple databases, all residing in the same shared-memory map. /// @@ -52,8 +46,6 @@ impl Environment { log_level: None, kind: Default::default(), handle_slow_readers: None, - #[cfg(feature = "read-tx-timeouts")] - max_read_transaction_duration: None, } } @@ -83,47 +75,21 @@ impl Environment { /// Returns the transaction manager. #[inline] - pub(crate) fn txn_manager(&self) -> &TxnManager { + pub(crate) fn txn_manager(&self) -> &LifecycleHandle { &self.inner.txn_manager } - /// Returns the number of timed out transactions that were not aborted by the user yet. - #[cfg(feature = "read-tx-timeouts")] - pub fn timed_out_not_aborted_transactions(&self) -> usize { - self.inner.txn_manager.timed_out_not_aborted_read_transactions().unwrap_or(0) - } - /// Create a read-only transaction for use with the environment. #[inline] - pub fn begin_ro_txn(&self) -> MdbxResult> { - TxSync::::new(self.clone()) + pub fn begin_ro_sync(&self) -> MdbxResult { + RoTxSync::begin(self.clone()) } /// Create a read-write transaction for use with the environment. This /// method will block while there are any other read-write transactions /// open on the environment. - pub fn begin_rw_txn(&self) -> MdbxResult> { - let mut warned = false; - let txn = loop { - let (tx, rx) = sync_channel(0); - self.txn_manager().send_message(TxnManagerMessage::Begin { - parent: RawTxPtr(ptr::null_mut()), - flags: RW::OPEN_FLAGS, - sender: tx, - }); - let res = rx.recv().unwrap(); - if matches!(&res, Err(MdbxError::Busy)) { - if !warned { - warned = true; - warn!(target: "libmdbx", "Process stalled, awaiting read-write transaction lock."); - } - sleep(Duration::from_millis(250)); - continue; - } - - break res; - }?; - Ok(TxSync::new_from_ptr(self.clone(), txn.0)) + pub fn begin_rw_sync(&self) -> MdbxResult { + RwTxSync::begin(self.clone()) } /// Create a single-threaded read-only transaction for use with the @@ -135,8 +101,8 @@ impl Environment { /// /// If the `read-tx-timeouts` feature is enabled, the transaction will /// have a default timeout applied. - pub fn begin_ro_unsync(&self) -> MdbxResult> { - TxUnsync::::new(self.clone()) + pub fn begin_ro_unsync(&self) -> MdbxResult { + RoTxUnsync::begin(self.clone()) } /// Create a single-threaded read-write transaction for use with the @@ -146,36 +112,8 @@ impl Environment { /// The returned Tx is `!Send` and `!Sync`. As a result, it saves about 30% /// overhead on transaction operations compared to the multi-threaded /// version, but cannot be sent or shared between threads. - pub fn begin_rw_unsync(&self) -> MdbxResult> { - TxUnsync::::new(self.clone()) - } - - /// Create a single-threaded read-only transaction without a timeout for use - /// with the environment. - /// - /// The returned Tx is `!Sync`. As a result, it saves about 30% overhead on - /// transaction operations compared to the multi-threaded version, but - /// cannot be sent or shared between threads. - /// - /// Instantiating a read-only transaction without a timeout is not - /// recommended, as it may lead to resource exhaustion if done excessively. - #[cfg(feature = "read-tx-timeouts")] - pub fn begin_ro_single_thread_no_timeout(&self) -> MdbxResult> { - TxUnsync::::new_no_timeout(self.clone()) - } - - /// Create a single-threaded read-only transaction with a custom timeout - /// for use with the environment. - /// - /// The returned Tx is `!Sync`. As a result, it saves about 30% overhead on - /// transaction operations compared to the multi-threaded version, but - /// cannot be sent or shared between threads. - #[cfg(feature = "read-tx-timeouts")] - pub fn begin_ro_single_thread_with_timeout( - &self, - duration: Duration, - ) -> MdbxResult> { - TxUnsync::::new_with_timeout(self.clone(), duration) + pub fn begin_rw_unsync(&self) -> MdbxResult { + RwTxUnsync::begin(self.clone()) } /// Returns a raw pointer to the underlying MDBX environment. @@ -261,7 +199,7 @@ impl Environment { /// * It will create a read transaction to traverse the freelist database. pub fn freelist(&self) -> ReadResult { let mut freelist: usize = 0; - let txn = self.begin_ro_txn()?; + let txn = self.begin_ro_unsync()?; let db = Database::freelist_db(); let mut cursor = txn.cursor(db)?; let mut iter = cursor.iter_slices(); @@ -293,7 +231,7 @@ struct EnvironmentInner { /// Whether the environment was opened as WRITEMAP. env_kind: EnvironmentKind, /// Transaction manager - txn_manager: TxnManager, + txn_manager: LifecycleHandle, } impl Drop for EnvironmentInner { @@ -676,11 +614,6 @@ pub struct EnvironmentBuilder { log_level: Option, kind: EnvironmentKind, handle_slow_readers: Option, - #[cfg(feature = "read-tx-timeouts")] - /// The maximum duration of a read transaction. If [None], but the - /// `read-tx-timeout` feature is enabled, the default value of - /// [`DEFAULT_MAX_READ_TRANSACTION_DURATION`] is used. - max_read_transaction_duration: Option, } impl EnvironmentBuilder { @@ -812,22 +745,7 @@ impl EnvironmentBuilder { let env_ptr = EnvPtr(env); - #[cfg(not(feature = "read-tx-timeouts"))] - let txn_manager = TxnManager::new(env_ptr); - - #[cfg(feature = "read-tx-timeouts")] - let txn_manager = { - if let crate::MaxReadTransactionDuration::Set(duration) = self - .max_read_transaction_duration - .unwrap_or(read_transactions::MaxReadTransactionDuration::Set( - DEFAULT_MAX_READ_TRANSACTION_DURATION, - )) - { - TxnManager::new_with_max_read_transaction_duration(env_ptr, duration) - } else { - TxnManager::new(env_ptr) - } - }; + let txn_manager = RwSyncLifecycle::spawn(env_ptr); let env = EnvironmentInner { env, txn_manager, env_kind: self.kind }; @@ -857,9 +775,10 @@ impl EnvironmentBuilder { /// /// This defines the number of slots in the lock table that is used to /// track readers in the environment. The default is 126. Starting a - /// read-only transaction normally ties a lock table slot to the [`TxSync`] - /// or [`TxUnsync`] object until it or the [Environment] object is - /// destroyed. + /// read-only transaction normally ties a lock table slot to [`Tx`] object + /// until it or the [`Environment`] object is destroyed. + /// + /// [`Tx`]: crate::tx::Tx pub const fn set_max_readers(&mut self, max_readers: u64) -> &mut Self { self.max_readers = Some(max_readers); self @@ -873,7 +792,10 @@ impl EnvironmentBuilder { /// /// Currently a moderate number of slots are cheap but a huge number gets /// expensive: 7-120 words per transaction, and every call to - /// [`TxSync::open_db()`] or [`TxSync::open_db_no_cache()`] does a linear + /// [`Tx::open_db()`] or [`Tx::open_db_no_cache()`] does a linear + /// + /// [`Tx::open_db()`]: crate::tx::Tx::open_db + /// [`Tx::open_db_no_cache()`]: crate::tx::Tx::open_db_no_cache /// search of the opened slots. pub const fn set_max_dbs(&mut self, v: usize) -> &mut Self { self.max_dbs = Some(v as u64); @@ -964,44 +886,6 @@ impl EnvironmentBuilder { } } -#[cfg(feature = "read-tx-timeouts")] -pub(crate) mod read_transactions { - use crate::EnvironmentBuilder; - use std::time::Duration; - - /// The maximum duration of a read transaction. - #[derive(Debug, Clone, Copy)] - #[cfg(feature = "read-tx-timeouts")] - pub enum MaxReadTransactionDuration { - /// The maximum duration of a read transaction is unbounded. - Unbounded, - /// The maximum duration of a read transaction is set to the given duration. - Set(Duration), - } - - #[cfg(feature = "read-tx-timeouts")] - impl MaxReadTransactionDuration { - /// Returns the duration if set, otherwise None for unbounded. - pub const fn as_duration(&self) -> Option { - match self { - Self::Unbounded => None, - Self::Set(duration) => Some(*duration), - } - } - } - - impl EnvironmentBuilder { - /// Set the maximum time a read-only transaction can be open. - pub const fn set_max_read_transaction_duration( - &mut self, - max_read_transaction_duration: MaxReadTransactionDuration, - ) -> &mut Self { - self.max_read_transaction_duration = Some(max_read_transaction_duration); - self - } - } -} - /// Converts a [`HandleSlowReadersCallback`] to the actual FFI function pointer. fn convert_hsr_fn(callback: Option) -> ffi::MDBX_hsr_func { unsafe { std::mem::transmute(callback) } @@ -1049,7 +933,7 @@ mod tests { // Insert some data in the database, so the read transaction can lock on the snapshot of it { - let tx = env.begin_rw_txn().unwrap(); + let tx = env.begin_rw_sync().unwrap(); let db = tx.open_db(None).unwrap(); for i in 0usize..1_000 { tx.put(db, i.to_le_bytes(), b"0", WriteFlags::empty()).unwrap() @@ -1058,11 +942,10 @@ mod tests { } // Create a read transaction - let _tx_ro = env.begin_ro_txn().unwrap(); - + let _tx_ro = env.begin_ro_sync().unwrap(); // Change previously inserted data, so the read transaction would use the previous snapshot { - let tx = env.begin_rw_txn().unwrap(); + let tx = env.begin_rw_sync().unwrap(); let db = tx.open_db(None).unwrap(); for i in 0usize..1_000 { tx.put(db, i.to_le_bytes(), b"1", WriteFlags::empty()).unwrap(); @@ -1073,7 +956,7 @@ mod tests { // Insert more data in the database, so we hit the DB size limit error, and MDBX tries to // kick long-lived readers and delete their snapshots { - let tx = env.begin_rw_txn().unwrap(); + let tx = env.begin_rw_sync().unwrap(); let db = tx.open_db(None).unwrap(); for i in 1_000usize..1_000_000 { match tx.put(db, i.to_le_bytes(), b"0", WriteFlags::empty()) { diff --git a/src/sys/mod.rs b/src/sys/mod.rs index ee03704..fde5df2 100644 --- a/src/sys/mod.rs +++ b/src/sys/mod.rs @@ -13,8 +13,6 @@ mod environment; pub(crate) use environment::EnvPtr; -#[cfg(feature = "read-tx-timeouts")] -pub(crate) use environment::read_transactions; pub use environment::{ Environment, EnvironmentBuilder, EnvironmentKind, Geometry, HandleSlowReadersCallback, HandleSlowReadersReturnCode, Info, PageSize, Stat, diff --git a/src/sys/txn_manager.rs b/src/sys/txn_manager.rs index 15fe7dc..9da4533 100644 --- a/src/sys/txn_manager.rs +++ b/src/sys/txn_manager.rs @@ -1,5 +1,4 @@ use crate::{ - CommitLatency, error::{MdbxResult, mdbx_result}, sys::EnvPtr, }; @@ -14,408 +13,151 @@ pub(crate) struct RawTxPtr(pub(crate) *mut ffi::MDBX_txn); unsafe impl Send for RawTxPtr {} unsafe impl Sync for RawTxPtr {} -pub(crate) enum TxnManagerMessage { - Begin { - parent: RawTxPtr, - flags: ffi::MDBX_txn_flags_t, - sender: SyncSender>, - }, - Abort { - tx: RawTxPtr, - sender: SyncSender>, - }, - Commit { - tx: RawTxPtr, - sender: SyncSender>, - }, +#[derive(Debug, Clone, Copy)] +pub(crate) struct CommitLatencyPtr(pub(crate) *mut ffi::MDBX_commit_latency); + +unsafe impl Send for CommitLatencyPtr {} +unsafe impl Sync for CommitLatencyPtr {} + +/// Begin transaction request +pub(crate) struct Begin { + pub(crate) parent: RawTxPtr, + pub(crate) flags: ffi::MDBX_txn_flags_t, + pub(crate) sender: SyncSender>, + pub(crate) span: tracing::Span, } -/// Manages transactions by doing two things: -/// - Opening, aborting, and committing transactions using [`TxnManager::send_message`] with the -/// corresponding [`TxnManagerMessage`] -/// - Aborting long-lived read transactions (if the `read-tx-timeouts` feature is enabled and -/// `TxnManager::with_max_read_transaction_duration` is called) -#[derive(Debug)] -pub(crate) struct TxnManager { - sender: SyncSender, - #[cfg(feature = "read-tx-timeouts")] - read_transactions: Option>, +/// Abort transaction request +pub(crate) struct Abort { + pub(crate) tx: RawTxPtr, + pub(crate) sender: SyncSender>, + pub(crate) span: tracing::Span, } -impl TxnManager { - pub(crate) fn new(env: EnvPtr) -> Self { - let (tx, rx) = sync_channel(0); - let txn_manager = Self { - sender: tx, - #[cfg(feature = "read-tx-timeouts")] - read_transactions: None, - }; +/// Commit transaction request +pub(crate) struct Commit { + pub(crate) tx: RawTxPtr, + pub(crate) latency: CommitLatencyPtr, + pub(crate) sender: SyncSender>, + pub(crate) span: tracing::Span, +} - txn_manager.start_message_listener(env, rx); +/// Messages sent to the [`TxnManager`]. +pub(crate) enum LifecycleEvent { + Begin(Begin), + Abort(Abort), + Commit(Commit), +} - txn_manager +impl From for LifecycleEvent { + fn from(begin: Begin) -> Self { + LifecycleEvent::Begin(begin) } +} - /// Spawns a new [`std::thread`] that listens to incoming [`TxnManagerMessage`] messages, - /// executes an FFI function, and returns the result on the provided channel. - /// - /// - [`TxnManagerMessage::Begin`] opens a new transaction with [`ffi::mdbx_txn_begin_ex`] - /// - [`TxnManagerMessage::Abort`] aborts a transaction with [`ffi::mdbx_txn_abort`] - /// - [`TxnManagerMessage::Commit`] commits a transaction with [`ffi::mdbx_txn_commit_ex`] - fn start_message_listener(&self, env: EnvPtr, rx: Receiver) { - let task = move || { - let env = env; - loop { - match rx.recv() { - Ok(msg) => match msg { - TxnManagerMessage::Begin { parent, flags, sender } => { - let _span = - tracing::debug_span!(target: "libmdbx::txn", "begin", flags) - .entered(); - let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); - let res = mdbx_result(unsafe { - ffi::mdbx_txn_begin_ex( - env.0, - parent.0, - flags, - &mut txn, - ptr::null_mut(), - ) - }) - .map(|_| RawTxPtr(txn)); - sender.send(res).unwrap(); - } - TxnManagerMessage::Abort { tx, sender } => { - let _span = - tracing::debug_span!(target: "libmdbx::txn", "abort").entered(); - sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap(); - } - TxnManagerMessage::Commit { tx, sender } => { - let _span = - tracing::debug_span!(target: "libmdbx::txn", "commit").entered(); - sender - .send({ - let mut latency = CommitLatency::new(); - mdbx_result(unsafe { - ffi::mdbx_txn_commit_ex(tx.0, latency.mdb_commit_latency()) - }) - .map(|v| (v, latency)) - }) - .unwrap(); - } - }, - Err(_) => return, - } - } - }; - std::thread::Builder::new().name("mdbx-rs-txn-manager".to_string()).spawn(task).unwrap(); +impl From for LifecycleEvent { + fn from(abort: Abort) -> Self { + LifecycleEvent::Abort(abort) } +} - pub(crate) fn send_message(&self, message: TxnManagerMessage) { - self.sender.send(message).unwrap() +impl From for LifecycleEvent { + fn from(commit: Commit) -> Self { + LifecycleEvent::Commit(commit) } } -#[cfg(feature = "read-tx-timeouts")] -mod read_transactions { - use crate::{ - RO, - error::mdbx_result, - sys::{environment::EnvPtr, txn_manager::TxnManager}, - tx::PtrSync, - }; - use dashmap::{DashMap, DashSet}; - use std::{ - backtrace::Backtrace, - sync::{Arc, mpsc::sync_channel}, - time::{Duration, Instant}, - }; - use tracing::{error, trace, warn}; - - const READ_TRANSACTIONS_CHECK_INTERVAL: Duration = Duration::from_secs(5); - - impl TxnManager { - /// Returns a new instance for which the maximum duration that a read transaction can be - /// open is set. - pub(crate) fn new_with_max_read_transaction_duration( - env: EnvPtr, - duration: Duration, - ) -> Self { - let read_transactions = Arc::new(ReadTransactions::new(duration)); - read_transactions.clone().start_monitor(); - - let (tx, rx) = sync_channel(0); - - let txn_manager = Self { sender: tx, read_transactions: Some(read_transactions) }; - - txn_manager.start_message_listener(env, rx); - - txn_manager - } - - /// Adds a new transaction to the list of active read transactions. - pub(crate) fn add_active_read_transaction(&self, ptr: *mut ffi::MDBX_txn, tx: PtrSync) { - if let Some(read_transactions) = &self.read_transactions { - read_transactions.add_active(ptr, tx); - } - } - - /// Removes a transaction from the list of active read transactions. - /// - /// Returns `true` if the transaction was found and removed. - pub(crate) fn remove_active_read_transaction(&self, ptr: *mut ffi::MDBX_txn) -> bool { - self.read_transactions.as_ref().is_some_and(|txs| txs.remove_active(ptr)) - } +/// Handle to communicate with the transaction manager. +pub(crate) struct LifecycleHandle { + sender: SyncSender, +} - /// Returns the number of timed out transactions that were not aborted by the user yet. - pub(crate) fn timed_out_not_aborted_read_transactions(&self) -> Option { - self.read_transactions - .as_ref() - .map(|read_transactions| read_transactions.timed_out_not_aborted()) - } +impl LifecycleHandle { + /// Sends a message to the transaction manager. + #[track_caller] + #[inline(always)] + pub(crate) fn send>(&self, msg: T) { + self.sender.send(msg.into()).unwrap(); } +} - // A pointer, exactly when it expires, and optionally a backtrace of where - // it was created - pub(super) type ActiveReadTransactionEntry = (PtrSync, Instant, Option>); - - #[derive(Debug, Default)] - pub(super) struct ReadTransactions { - /// Maximum duration that a read transaction can be open until the - /// [`ReadTransactions::start_monitor`] aborts it. - max_duration: Duration, - /// List of currently active read transactions. - /// - /// We store `usize` instead of a raw pointer as a key, because - /// pointers are not comparable. The time of transaction opening is - /// stored as a value. - /// - /// The backtrace of the transaction opening is recorded only when - /// debug assertions are enabled. - active: DashMap, - /// List of timed out transactions that were not aborted by the user - /// yet, hence have a dangling read transaction pointer. - timed_out_not_aborted: DashSet, +impl From> for LifecycleHandle { + fn from(sender: SyncSender) -> Self { + Self { sender } } +} - impl ReadTransactions { - pub(super) fn new(max_duration: Duration) -> Self { - Self { max_duration, ..Default::default() } - } - - /// Adds a new transaction to the list of active read transactions. - pub(super) fn add_active(&self, ptr: *mut ffi::MDBX_txn, tx: PtrSync) { - let _ = self.active.insert( - ptr as usize, - ( - tx, - Instant::now(), - cfg!(debug_assertions).then(|| Arc::new(Backtrace::force_capture())), - ), - ); - } - - /// Removes a transaction from the list of active read transactions. - pub(super) fn remove_active(&self, ptr: *mut ffi::MDBX_txn) -> bool { - self.timed_out_not_aborted.remove(&(ptr as usize)); - self.active.remove(&(ptr as usize)).is_some() - } +/// Manages RW transactions in a background thread. +/// +/// MDBX requires that RW transactions are committed and aborted +/// from the same thread that created them. This struct spawns a +/// background thread to handle these operations for Sync RW transactions. +#[derive(Debug)] +pub(crate) struct RwSyncLifecycle { + env: EnvPtr, + rx: Receiver, +} - /// Returns the number of timed out transactions that were not aborted by the user yet. - pub(super) fn timed_out_not_aborted(&self) -> usize { - self.timed_out_not_aborted.len() - } +impl RwSyncLifecycle { + /// Creates a new [`TxnManager`], spawns a background task, returns + /// a sender to communicate with it. + pub(crate) fn spawn(env: EnvPtr) -> LifecycleHandle { + let (tx, rx) = sync_channel(0); + let txn_manager = Self { env, rx }; - /// Spawns a new [`std::thread`] that monitors the list of active read transactions and - /// timeouts those that are open for longer than `ReadTransactions.max_duration`. - pub(super) fn start_monitor(self: Arc) { - let task = move || { - let mut timed_out_active = Vec::new(); + txn_manager.start_message_listener(); - loop { - let now = Instant::now(); - let mut max_active_transaction_duration = None; + tx.into() + } - // Iterate through active read transactions and time out those that's open for - // longer than `self.max_duration`. - for entry in &self.active { - let (tx, start, backtrace) = entry.value(); - let duration = now - *start; + /// Begin a RW transaction. + fn handle_begin(&self, Begin { parent, flags, sender, span }: Begin) { + let _guard = span.entered(); + let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); + let res = mdbx_result(unsafe { + ffi::mdbx_txn_begin_ex(self.env.0, parent.0, flags, &mut txn, ptr::null_mut()) + }) + .map(|_| RawTxPtr(txn)); + sender.send(res).unwrap(); + } - if duration > self.max_duration { - let mut lock = tx.lock(); - // SAFETY: We have exclusive access to the - // transaction, as we hold the lock. - let txn_ptr = unsafe { tx.txn_ptr() }; - let result = mdbx_result(unsafe { ffi::mdbx_txn_reset(txn_ptr) }); + // Abort a transaction. + fn handle_abort(&self, Abort { tx, sender, span }: Abort) { + let _guard = span.entered(); + sender.send(mdbx_result(unsafe { ffi::mdbx_txn_abort(tx.0) })).unwrap(); + } - if result.is_ok() { - // Set timed out - *lock = true; - } + /// Commit a transaction. + fn handle_commit(&self, Commit { tx, sender, latency, span }: Commit) { + let _guard = span.entered(); + sender.send(mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(tx.0, latency.0) })).unwrap(); + } - // Add the transaction to `timed_out_active`. We - // can't remove it instantly from the list of - // active transactions, because we iterate through - // it. - timed_out_active.push((txn_ptr, duration, backtrace.clone(), result)); - } else { - max_active_transaction_duration = Some( - duration.max(max_active_transaction_duration.unwrap_or_default()), - ); + /// Spawns a new [`std::thread`] that listens to incoming [`RwSyncLifecycle::Message`] messages, + /// executes an FFI function, and returns the result on the provided channel. + /// + /// - [`RwSyncLifecycle::Message::Begin`] opens a new transaction with [`ffi::mdbx_txn_begin_ex`] + /// - [`RwSyncLifecycle::Message::Abort`] aborts a transaction with [`ffi::mdbx_txn_abort`] + /// - [`RwSyncLifecycle::Message::Commit`] commits a transaction with [`ffi::mdbx_txn_commit_ex`] + fn start_message_listener(self) { + let task = move || { + loop { + match self.rx.recv() { + Ok(msg) => match msg { + LifecycleEvent::Begin(begin) => { + self.handle_begin(begin); } - } - - // Walk through timed out transactions, and delete them from the list of active - // transactions. - for (ptr, open_duration, backtrace, err) in timed_out_active.iter().cloned() { - // Try deleting the transaction from the list of active transactions. - let was_in_active = self.remove_active(ptr); - if let Err(err) = err { - if was_in_active { - // If the transaction was in the list of active transactions, - // then user didn't abort it and we failed to do so. - error!(target: "libmdbx", %err, ?open_duration, ?backtrace, "Failed to time out the long-lived read transaction"); - } - } else { - // Happy path, the transaction has been timed out by us with no errors. - warn!(target: "libmdbx", ?open_duration, ?backtrace, "Long-lived read transaction has been timed out"); - // Add transaction to the list of timed out transactions that were not - // aborted by the user yet. - self.timed_out_not_aborted.insert(ptr as usize); + LifecycleEvent::Abort(abort) => { + self.handle_abort(abort); } - } - - // Clear the list of timed out transactions, but not de-allocate the reserved - // capacity to save on further pushes. - timed_out_active.clear(); - - if !self.active.is_empty() { - trace!( - target: "libmdbx", - elapsed = ?now.elapsed(), - active = ?self.active.iter().map(|entry| { - let (tx, start, _) = entry.value(); - (tx.clone(), start.elapsed()) - }).collect::>(), - "Read transactions" - ); - } - - // Sleep not more than `READ_TRANSACTIONS_CHECK_INTERVAL`, but at least until - // the closest deadline of an active read transaction - let sleep_duration = READ_TRANSACTIONS_CHECK_INTERVAL.min( - self.max_duration - max_active_transaction_duration.unwrap_or_default(), - ); - trace!(target: "libmdbx", ?sleep_duration, elapsed = ?now.elapsed(), "Putting transaction monitor to sleep"); - std::thread::sleep(sleep_duration); + LifecycleEvent::Commit(commit) => { + self.handle_commit(commit); + } + }, + Err(_) => return, } - }; - std::thread::Builder::new() - .name("mdbx-rs-read-tx-timeouts".to_string()) - .spawn(task) - .unwrap(); - } - } - - #[cfg(test)] - mod tests { - use crate::{ - Environment, MaxReadTransactionDuration, MdbxError, - sys::txn_manager::read_transactions::READ_TRANSACTIONS_CHECK_INTERVAL, - }; - use std::{thread::sleep, time::Duration}; - use tempfile::tempdir; - - #[test] - fn txn_manager_read_transactions_duration_set() { - const MAX_DURATION: Duration = Duration::from_secs(1); - - let dir = tempdir().unwrap(); - let env = Environment::builder() - .set_max_read_transaction_duration(MaxReadTransactionDuration::Set(MAX_DURATION)) - .open(dir.path()) - .unwrap(); - - let read_transactions = env.txn_manager().read_transactions.as_ref().unwrap(); - - // Create a read-only transaction, successfully use it, close it by dropping. - { - let tx = env.begin_ro_txn().unwrap(); - - let tx_ptr = tx.txn_execute(|ptr| ptr as usize).unwrap(); - assert!(read_transactions.active.contains_key(&tx_ptr)); - - tx.open_db(None).unwrap(); - drop(tx); - - assert!(!read_transactions.active.contains_key(&tx_ptr)); } - - // Create a read-only transaction, successfully use it, close it by committing. - { - let tx = env.begin_ro_txn().unwrap(); - let tx_ptr = tx.txn_execute(|ptr| ptr as usize).unwrap(); - assert!(read_transactions.active.contains_key(&tx_ptr)); - - tx.open_db(None).unwrap(); - tx.commit().unwrap(); - - assert!(!read_transactions.active.contains_key(&tx_ptr)); - } - - { - // Create a read-only transaction and observe it's in the list of active - // transactions. - let tx = env.begin_ro_txn().unwrap(); - - let tx_ptr = tx.txn_execute(|ptr| ptr as usize).unwrap(); - assert!(read_transactions.active.contains_key(&tx_ptr)); - - // Wait until the transaction is timed out by the manager. - sleep(MAX_DURATION + READ_TRANSACTIONS_CHECK_INTERVAL); - - // Ensure that the transaction is not in the list of active transactions anymore, - // and is in the list of timed out but not aborted transactions. - assert!(!read_transactions.active.contains_key(&tx_ptr)); - assert!(read_transactions.timed_out_not_aborted.contains(&tx_ptr)); - - // Use the timed out transaction and observe the `Error::ReadTransactionTimeout` - assert_eq!(tx.open_db(None).err(), Some(MdbxError::ReadTransactionTimeout)); - assert!(!read_transactions.active.contains_key(&tx_ptr)); - assert!(read_transactions.timed_out_not_aborted.contains(&tx_ptr)); - - assert_eq!(tx.id().err(), Some(MdbxError::ReadTransactionTimeout)); - assert!(!read_transactions.active.contains_key(&tx_ptr)); - assert!(read_transactions.timed_out_not_aborted.contains(&tx_ptr)); - - // Ensure that the transaction pointer is not reused when opening a new read-only - // transaction. - let new_tx = env.begin_ro_txn().unwrap(); - let new_tx_ptr = new_tx.txn_execute(|ptr| ptr as usize).unwrap(); - assert!(read_transactions.active.contains_key(&new_tx_ptr)); - assert_ne!(tx_ptr, new_tx_ptr); - - // Drop the transaction and ensure that it's not in the list of timed out but not - // aborted transactions anymore. - drop(tx); - assert!(!read_transactions.timed_out_not_aborted.contains(&tx_ptr)); - } - } - - #[test] - fn txn_manager_read_transactions_duration_unbounded() { - let dir = tempdir().unwrap(); - let env = Environment::builder() - .set_max_read_transaction_duration(MaxReadTransactionDuration::Unbounded) - .open(dir.path()) - .unwrap(); - - assert!(env.txn_manager().read_transactions.is_none()); - - let tx = env.begin_ro_txn().unwrap(); - sleep(READ_TRANSACTIONS_CHECK_INTERVAL); - tx.commit().unwrap(); - } + }; + std::thread::Builder::new().name("mdbx-rs-txn-manager".to_string()).spawn(task).unwrap(); } } diff --git a/src/tx/access.rs b/src/tx/access.rs index c6a938b..a9c973a 100644 --- a/src/tx/access.rs +++ b/src/tx/access.rs @@ -1,27 +1,26 @@ use crate::{ - Environment, MdbxResult, TransactionKind, - sys::txn_manager::{RawTxPtr, TxnManagerMessage}, + Environment, + sys::txn_manager::{Abort, RawTxPtr}, }; -use core::{fmt, marker::PhantomData}; +use core::fmt; use parking_lot::{Mutex, MutexGuard}; -use std::{ - ops, - sync::{ - Arc, - atomic::{AtomicBool, Ordering}, - mpsc::sync_channel, - }, +use std::sync::{ + Arc, + atomic::{AtomicBool, Ordering}, + mpsc::sync_channel, }; +use tracing::debug_span; mod sealed { - use super::*; - #[allow(unreachable_pub)] pub trait Sealed {} - impl Sealed for super::RoGuard {} - impl Sealed for super::RwUnsync {} - impl Sealed for super::PtrSyncInner {} - impl Sealed for super::PtrSync {} + impl Sealed for super::PtrUnsync {} + impl Sealed for super::PtrSync {} + + impl Sealed for &T where T: super::TxPtrAccess {} + impl Sealed for &mut T where T: super::TxPtrAccess {} + impl Sealed for std::sync::Arc where T: super::TxPtrAccess {} + impl Sealed for Box where T: super::TxPtrAccess {} } /// Trait for accessing the transaction pointer. @@ -32,347 +31,97 @@ mod sealed { /// and ownership semantics. #[allow(unreachable_pub)] pub trait TxPtrAccess: fmt::Debug + sealed::Sealed { - /// Execute a closure with the transaction pointer. - fn with_txn_ptr(&self, f: F) -> MdbxResult + /// Create an instance of the implementing type from a raw transaction + /// pointer. + fn from_ptr_and_env(ptr: *mut ffi::MDBX_txn, env: Environment, is_read_only: bool) -> Self where - F: FnOnce(*mut ffi::MDBX_txn) -> R; + Self: Sized; - /// Execute a closure with the transaction pointer, attempting to renew - /// the transaction if it has timed out. - /// - /// This is primarily used for cleanup operations (like closing cursors) - /// that need to succeed even after a timeout. For implementations that - /// don't support renewal (like `RoGuard` after the Arc is dropped), this - /// falls back to `with_txn_ptr`. - fn with_txn_ptr_for_cleanup(&self, f: F) -> MdbxResult + /// Execute a closure with the transaction pointer. + fn with_txn_ptr(&self, f: F) -> R where - F: FnOnce(*mut ffi::MDBX_txn) -> R, - { - // Default: just use the normal path - self.with_txn_ptr(f) - } + F: FnOnce(*mut ffi::MDBX_txn) -> R; /// Mark the transaction as committed. fn mark_committed(&self); -} -/// Wrapper for raw txn pointer for RW transactions. -pub struct RwUnsync { - committed: AtomicBool, - ptr: *mut ffi::MDBX_txn, -} - -impl fmt::Debug for RwUnsync { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RwUnsync").field("committed", &self.committed).finish() + /// Get the transaction ID by making a call into the MDBX C API. + fn tx_id(&self) -> Option { + let mut id = 0; + self.with_txn_ptr(|ptr| { + id = unsafe { ffi::mdbx_txn_id(ptr) as usize }; + }); + // 0 indicates the transaction is not valid + (id != 0).then_some(id) } } -impl RwUnsync { - /// Create a new [`RwUnsync`]. - pub(crate) const fn new(ptr: *mut ffi::MDBX_txn) -> Self { - Self { committed: AtomicBool::new(false), ptr } +impl TxPtrAccess for Arc +where + T: TxPtrAccess, +{ + fn from_ptr_and_env(ptr: *mut ffi::MDBX_txn, env: Environment, is_read_only: bool) -> Self + where + Self: Sized, + { + T::from_ptr_and_env(ptr, env, is_read_only).into() } -} -impl TxPtrAccess for RwUnsync { - fn with_txn_ptr(&self, f: F) -> MdbxResult + fn with_txn_ptr(&self, f: F) -> R where F: FnOnce(*mut ffi::MDBX_txn) -> R, { - Ok(f(self.ptr)) + self.as_ref().with_txn_ptr(f) } fn mark_committed(&self) { - // SAFETY: - // Type is neither Sync nor Send, so no concurrent access is possible. - unsafe { *self.committed.as_ptr() = true }; - } -} - -impl Drop for RwUnsync { - fn drop(&mut self) { - // SAFETY: - // We have exclusive ownership of this pointer. - unsafe { - if !*self.committed.as_ptr() { - ffi::mdbx_txn_abort(self.ptr); - } - } - } -} - -/// Wrapper for raw txn pointer that calls abort on drop. -/// -/// Used by the timeout mechanism - when the Arc is dropped, the transaction -/// is aborted. -pub(crate) struct RoTxPtr { - ptr: *mut ffi::MDBX_txn, -} - -impl fmt::Debug for RoTxPtr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RoTxPtr").finish() - } -} - -#[cfg(feature = "read-tx-timeouts")] -impl Drop for RoTxPtr { - fn drop(&mut self) { - // SAFETY: - // We have exclusive ownership of this pointer. - // This is guaranteed by the Arc mechanism in RoGuard. - unsafe { - ffi::mdbx_txn_abort(self.ptr); - } + self.as_ref().mark_committed(); } } -impl From<*mut ffi::MDBX_txn> for RoTxPtr { - fn from(txn: *mut ffi::MDBX_txn) -> Self { - Self { ptr: txn } - } -} - -// SAFETY: -// The RO transaction can be sent between threads, but not shared. RO -// transactions are not Sync because operations must be serialized. -unsafe impl Send for RoTxPtr {} - -// SAFETY -// Usage within this crate MUST ensure that RoTxPtr is not used concurrently. -// Implementing Sync here allows RoTxPtr to be held in the Arc that we use for -// timeouts. -unsafe impl Sync for RoTxPtr {} - -#[cfg(feature = "read-tx-timeouts")] -type WeakRoTxPtr = std::sync::Weak; - -type PhantomUnsync = PhantomData std::cell::Cell<()>>; - -/// Guard that keeps a RO transaction alive. -/// -/// This type MUST NOT be Sync, to prevent concurrent use of the underlying RO -/// tx pointer. -pub struct RoGuard { - /// Strong reference to keep the transaction alive. - strong: Option>, - - /// Weak reference for timeout case. - #[cfg(feature = "read-tx-timeouts")] - weak: WeakRoTxPtr, - - /// Whether the transaction was committed. +/// Wrapper for raw txn pointer for RW transactions. +pub struct PtrUnsync { committed: AtomicBool, - - /// Marker to prevent Sync implementation. - _unsync: PhantomUnsync, + ptr: *mut ffi::MDBX_txn, } -impl fmt::Debug for RoGuard { +impl fmt::Debug for PtrUnsync { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RoGuard").field("committed", &self.committed).finish() - } -} - -impl RoGuard { - /// Create a new RoGuard with no timeout (we keep the Arc). - /// - /// # Warning - /// - /// RO transactions consume resources while open. Disabling the timeout - /// without closing the transaction may lead to resource exhaustion if - /// done excessively. - #[cfg_attr(feature = "read-tx-timeouts", allow(dead_code))] - pub(crate) fn new_no_timeout(ptr: RoTxPtr) -> Self { - let arc = std::sync::Arc::new(ptr); - - #[cfg(feature = "read-tx-timeouts")] - let weak = std::sync::Arc::downgrade(&arc); - - Self { - strong: Some(arc), - - #[cfg(feature = "read-tx-timeouts")] - weak, - - committed: AtomicBool::new(false), - - _unsync: PhantomData, - } - } - - /// Create a new RoGuard with a timeout. After the timeout, the transaction - /// will be aborted (unless a [`Self::with_txn_ptr`] call is in progress). - #[cfg(feature = "read-tx-timeouts")] - pub(crate) fn new_with_timeout(ptr: RoTxPtr, duration: std::time::Duration) -> Self { - let arc = std::sync::Arc::new(ptr); - let weak = std::sync::Arc::downgrade(&arc); - std::thread::spawn(move || { - std::thread::sleep(duration); - // Drop the Arc, aborting the transaction. - drop(arc); - }); - - Self { strong: None, weak, committed: AtomicBool::new(false), _unsync: PhantomData } - } - - /// Try to get a strong reference to the transaction pointer. - pub(crate) fn try_ref(&self) -> Option> { - // SAFETY: - // Type is not Sync. So no concurrent access is possible. - if unsafe { *self.committed.as_ptr() } { - return None; - } - - if let Some(strong) = &self.strong { - return Some(strong.clone()); - } - - #[cfg(feature = "read-tx-timeouts")] - { - self.weak.upgrade() - } - - #[cfg(not(feature = "read-tx-timeouts"))] - { - None - } - } - - /// Attempt to upgrade the weak reference to a strong one, disabling the - /// timeout. On success, the transaction will remain valid until this guard - /// is dropped. - /// - /// # Warning - /// - /// RO transactions consume resources while open. Disabling the timeout - /// without closing the transaction may lead to resource exhaustion if - /// done excessively. - #[cfg(feature = "read-tx-timeouts")] - pub(crate) fn try_disable_timer(&mut self) -> MdbxResult<()> { - if self.strong.is_some() { - return Ok(()); - } - if let Some(arc) = self.weak.upgrade() { - self.strong = Some(arc); - return Ok(()); - } - Err(crate::MdbxError::ReadTransactionTimeout) + f.debug_struct("PtrUnsync").field("committed", &self.committed).finish() } } -impl TxPtrAccess for RoGuard { - /// Execute a closure with the transaction pointer, failing if timed out. - /// - /// Calling this function will ensure that the transaction is still valid - /// until the closure returns. If the closure returns an error, it will be - /// propagated. - /// - /// # Warnings - /// - /// The closure CAN NOT store the pointer or references derived from it, as - /// they may become invalid if the transaction times out. - /// - /// The closure prevents the transaction from timing out while it is - /// executing. The closure is expected to be short-lived to avoid holding - /// open resources. - /// - /// The `&mut self` receiver ensures that concurrent calls to this method - /// are not possible, preventing data races on the underlying transaction. - /// This is a HARD REQUIREMENT for safety. - fn with_txn_ptr(&self, f: F) -> MdbxResult +impl TxPtrAccess for PtrUnsync { + fn from_ptr_and_env(ptr: *mut ffi::MDBX_txn, _env: Environment, _is_read_only: bool) -> Self where - F: FnOnce(*mut ffi::MDBX_txn) -> R, + Self: Sized, { - #[cfg(feature = "read-tx-timeouts")] - { - // Fast path: if we own it, use directly. - // This is ALWAYS the case without timeouts. - if let Some(strong) = self.try_ref() { - return Ok(f(strong.ptr)); - } - Err(crate::MdbxError::ReadTransactionTimeout) - } - - #[cfg(not(feature = "read-tx-timeouts"))] - { - let Some(arc) = self.try_ref() else { unreachable!() }; - Ok(f(arc.ptr)) - } + Self { committed: AtomicBool::new(false), ptr } } - /// Execute a cleanup closure, even if the transaction has timed out. - /// - /// When a transaction times out, the Arc is dropped and the transaction is - /// aborted. However, cursors still need to be closed to free memory. This - /// method ensures the cleanup closure runs regardless of timeout status. - /// - /// If the transaction has timed out, a null pointer is passed since the - /// transaction no longer exists. Cleanup operations (like cursor close) - /// don't actually need the transaction pointer. - fn with_txn_ptr_for_cleanup(&self, f: F) -> MdbxResult + fn with_txn_ptr(&self, f: F) -> R where F: FnOnce(*mut ffi::MDBX_txn) -> R, { - #[cfg(feature = "read-tx-timeouts")] - { - // If we can get the strong ref, use it normally. - if let Some(strong) = self.try_ref() { - return Ok(f(strong.ptr)); - } - // Transaction timed out and was aborted. Still run cleanup with - // null pointer - cursor close doesn't need a valid txn. - Ok(f(std::ptr::null_mut())) - } - - #[cfg(not(feature = "read-tx-timeouts"))] - { - // Without timeouts, we always have the Arc. - let Some(arc) = self.try_ref() else { unreachable!() }; - Ok(f(arc.ptr)) - } + f(self.ptr) } fn mark_committed(&self) { // SAFETY: - // Type is not Sync. So no concurrent access is possible. + // Type is neither Sync nor Send, so no concurrent access is possible. unsafe { *self.committed.as_ptr() = true }; } } -/// A shareable, thread-safe pointer to an MDBX transaction. -pub(crate) struct PtrSync { - inner: Arc>, -} - -impl fmt::Debug for PtrSync { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("PtrSync") - .field("txn", &(self.inner.txn as usize)) - .field("committed", &self.inner.committed) - .finish() - } -} - -impl ops::Deref for PtrSync { - type Target = PtrSyncInner; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl Clone for PtrSync { - fn clone(&self) -> Self { - Self { inner: Arc::clone(&self.inner) } - } -} - -impl PtrSync { - /// Create a new PtrSync. - pub(crate) fn new(env: Environment, txn: *mut ffi::MDBX_txn) -> Self { - Self { inner: Arc::new(PtrSyncInner::new(env, txn)) } +impl Drop for PtrUnsync { + fn drop(&mut self) { + // SAFETY: + // We have exclusive ownership of this pointer. + unsafe { + if !*self.committed.as_ptr() { + ffi::mdbx_txn_abort(self.ptr); + } + } } } @@ -383,7 +132,7 @@ impl PtrSync { /// /// [`TxSync`]: crate::tx::TxSync #[derive(Debug)] -pub struct PtrSyncInner { +pub struct PtrSync { /// Raw pointer to the MDBX transaction. txn: *mut ffi::MDBX_txn, @@ -392,56 +141,25 @@ pub struct PtrSyncInner { /// Contains a lock to ensure exclusive access to the transaction. /// The inner boolean indicates the timeout status. - lock: Mutex, + lock: Mutex<()>, /// The environment that owns the transaction. env: Environment, - /// Tracing span for this transaction's lifecycle. - span: tracing::Span, - - /// Marker for the transaction kind. - _marker: PhantomData K>, + /// Whether the transaction is read-only. + is_read_only: bool, } -impl PtrSyncInner { - /// Create a new PtrSyncInner. - pub(crate) fn new(env: Environment, txn: *mut ffi::MDBX_txn) -> Self { - // Record txn_id after creation - let txn_id = unsafe { ffi::mdbx_txn_id(txn) }; - - let span = tracing::debug_span!( - target: "libmdbx", - "mdbx_txn", - kind = %if K::IS_READ_ONLY { "ro" } else { "rw" }, - txn_id = tracing::field::Empty, - ); - span.record("txn_id", txn_id); +// SAFETY: Access to the transaction is synchronized by the lock. +unsafe impl Send for PtrSync {} - Self { - txn, - committed: AtomicBool::new(false), - lock: Mutex::new(false), - env, - _marker: PhantomData, - span, - } - } - - /// Returns the raw pointer to the MDBX transaction. - /// - /// # Safety - /// - /// The caller MUST NOT perform any mdbx operations on the returned pointer - /// unless the caller ALSO holds the lock returned by [`Self::lock`]. - #[cfg(feature = "read-tx-timeouts")] - pub(crate) const unsafe fn txn_ptr(&self) -> *mut ffi::MDBX_txn { - self.txn - } +// SAFETY: Access to the transaction is synchronized by the lock. +unsafe impl Sync for PtrSync {} +impl PtrSync { /// Acquires the inner transaction lock to guarantee exclusive access to the transaction /// pointer. - pub(crate) fn lock(&self) -> MutexGuard<'_, bool> { + pub(crate) fn lock(&self) -> MutexGuard<'_, ()> { if let Some(lock) = self.lock.try_lock() { lock } else { @@ -455,69 +173,28 @@ impl PtrSyncInner { self.lock.lock() } } - - /// Executes the given closure once the lock on the transaction is acquired. - /// - /// Returns the result of the closure or an error if the transaction is - /// timed out. - #[inline] - pub(crate) fn txn_execute_fail_on_timeout(&self, f: F) -> MdbxResult - where - F: FnOnce(*mut ffi::MDBX_txn) -> T, - { - self.with_txn_ptr(f) - } - - /// Executes the given closure once the lock on the transaction is - /// acquired. If the transaction is timed out, it will be renewed first. - /// - /// Returns the result of the closure or an error if the transaction renewal fails. - #[inline] - pub(crate) fn txn_execute_renew_on_timeout(&self, f: F) -> MdbxResult - where - F: FnOnce(*mut ffi::MDBX_txn) -> T, - { - let _lck = self.lock(); - - // To be able to do any operations on the transaction, we need to renew it first. - #[cfg(feature = "read-tx-timeouts")] - if *_lck { - use crate::error::mdbx_result; - mdbx_result(unsafe { ffi::mdbx_txn_renew(self.txn) })?; - } - - Ok((f)(self.txn)) - } - - /// Returns a reference to the environment that owns this transaction. - pub(crate) const fn env(&self) -> &Environment { - &self.env - } - - /// Returns the tracing span for this transaction. - pub(crate) const fn span(&self) -> &tracing::Span { - &self.span - } } -impl TxPtrAccess for PtrSyncInner { - fn with_txn_ptr(&self, f: F) -> MdbxResult +impl TxPtrAccess for PtrSync { + fn from_ptr_and_env(ptr: *mut ffi::MDBX_txn, env: Environment, is_read_only: bool) -> Self where - F: FnOnce(*mut ffi::MDBX_txn) -> R, + Self: Sized, { - let timeout_flag = self.lock(); - if *timeout_flag { - return Err(crate::MdbxError::ReadTransactionTimeout); + Self { + committed: AtomicBool::new(false), + lock: Mutex::new(()), + txn: ptr, + env, + is_read_only, } - let result = f(self.txn); - Ok(result) } - fn with_txn_ptr_for_cleanup(&self, f: F) -> MdbxResult + fn with_txn_ptr(&self, f: F) -> R where F: FnOnce(*mut ffi::MDBX_txn) -> R, { - self.txn_execute_renew_on_timeout(f) + let _lock = self.lock(); + f(self.txn) } fn mark_committed(&self) { @@ -525,41 +202,26 @@ impl TxPtrAccess for PtrSyncInner { } } -impl Drop for PtrSyncInner { +impl Drop for PtrSync { fn drop(&mut self) { if self.committed.load(Ordering::SeqCst) { return; } - let _guard = self.span().enter(); - tracing::debug!(target: "libmdbx", "aborted"); - - // RO transactions can be aborted directly. - if K::IS_READ_ONLY { - #[cfg(feature = "read-tx-timeouts")] - self.env.txn_manager().remove_active_read_transaction(self.txn); - - unsafe { - ffi::mdbx_txn_abort(self.txn); - } + if self.is_read_only { + // RO: direct abort is safe and fast. + // SAFETY: We have exclusive ownership of this pointer. + unsafe { ffi::mdbx_txn_abort(self.txn) }; } else { - // RW transactions need to be aborted via the txn manager. + // RW: must go through txn manager for thread safety. let (sender, rx) = sync_channel(0); - self.env - .txn_manager() - .send_message(TxnManagerMessage::Abort { tx: RawTxPtr(self.txn), sender }); + self.env.txn_manager().send(Abort { + tx: RawTxPtr(self.txn), + sender, + span: debug_span!("txn_manager_abort"), + }); rx.recv().unwrap().unwrap(); } - } -} - -#[cfg(test)] -mod test { - use crate::tx::RoGuard; - - // Compile-time check: RO is Send - const fn _assert_ro_send() { - const fn _assert_send() {} - _assert_send::(); + tracing::debug!(target: "libmdbx", "aborted"); } } diff --git a/src/tx/cache.rs b/src/tx/cache.rs index ac4f279..858a850 100644 --- a/src/tx/cache.rs +++ b/src/tx/cache.rs @@ -1,16 +1,51 @@ -use std::hash::{Hash, Hasher}; +//! Caches for [`Database`] info, used by the [`TxSync`] and [`TxUnsync`] types. +//! +//! This module defines cache types for storing database handles within +//! transactions. Caches improve performance by avoiding repeated lookups of +//! database information. +//! +//! The primary caches are: +//! - [`DbCache`]: A simple inline cache using `SmallVec` for efficient storage +//! of a small number of database handles. Used in unsynchronized +//! transactions via [`RefCell`]. +//! - [`SharedCache`]: A thread-safe cache using `Arc>` for +//! synchronized transactions. +//! +//! [`TxSync`]: crate::tx::TxSync +//! [`TxUnsync`]: crate::tx::TxUnsync +use crate::Database; use parking_lot::RwLock; use smallvec::SmallVec; +use std::{ + cell::RefCell, + hash::{Hash, Hasher}, + sync::Arc, +}; -use crate::Database; +/// Cache trait for transaction-local database handles. +/// +/// This is used by the [`SyncKind`] trait to define the cache type for each +/// transaction kind. +/// +/// [`SyncKind`]: crate::tx::kind::SyncKind +pub trait Cache: Clone + Default + std::fmt::Debug { + /// Read a database entry from the cache. + fn read_db(&self, name_hash: u64) -> Option; + + /// Write a database entry to the cache. + fn write_db(&self, db: CachedDb); + + /// Remove a database entry from the cache by dbi. + fn remove_dbi(&self, dbi: ffi::MDBX_dbi); +} /// Cached database entry. /// /// Uses hash-only comparison since 64-bit hash collisions are negligible /// for practical database counts. #[derive(Debug, Clone, Copy)] -pub(crate) struct CachedDb { +pub struct CachedDb { /// Hash of database name (None hashes distinctly from any string). name_hash: u64, /// The cached database (dbi + flags). @@ -43,11 +78,11 @@ impl From for Database { /// Uses inline storage for the common case (most apps use < 16 databases). #[derive(Debug, Default, Clone)] #[repr(transparent)] -pub(crate) struct DbCache(SmallVec<[CachedDb; 16]>); +pub struct DbCache(SmallVec<[CachedDb; 16]>); impl DbCache { /// Read a database entry from the cache. - pub(crate) fn read_db(&self, name_hash: u64) -> Option { + fn read_db(&self, name_hash: u64) -> Option { for entry in self.0.iter() { if entry.name_hash == name_hash { return Some(entry.db); @@ -57,7 +92,7 @@ impl DbCache { } /// Write a database entry to the cache. - pub(crate) fn write_db(&mut self, db: CachedDb) { + fn write_db(&mut self, db: CachedDb) { for entry in self.0.iter() { if entry.name_hash == db.name_hash { return; // Another thread beat us @@ -67,7 +102,7 @@ impl DbCache { } /// Remove a database entry from the cache by dbi. - pub(crate) fn remove_dbi(&mut self, dbi: ffi::MDBX_dbi) { + fn remove_dbi(&mut self, dbi: ffi::MDBX_dbi) { self.0.retain(|entry| entry.db.dbi() != dbi); } } @@ -75,15 +110,15 @@ impl DbCache { /// Simple cache container for database handles. /// /// Uses inline storage for the common case (most apps use < 16 databases). -#[derive(Debug)] -pub(crate) struct SharedCache { - cache: RwLock, +#[derive(Debug, Clone)] +pub struct SharedCache { + cache: Arc>, } impl SharedCache { /// Creates a new empty cache. fn new() -> Self { - Self { cache: RwLock::new(DbCache::default()) } + Self { cache: Arc::new(RwLock::new(DbCache::default())) } } /// Returns a read guard to the cache. @@ -95,21 +130,23 @@ impl SharedCache { fn write(&self) -> parking_lot::RwLockWriteGuard<'_, DbCache> { self.cache.write() } +} +impl Cache for SharedCache { /// Read a database entry from the cache. - pub(crate) fn read_db(&self, name_hash: u64) -> Option { + fn read_db(&self, name_hash: u64) -> Option { let cache = self.read(); cache.read_db(name_hash) } /// Write a database entry to the cache. - pub(crate) fn write_db(&self, db: CachedDb) { + fn write_db(&self, db: CachedDb) { let mut cache = self.write(); cache.write_db(db); } /// Remove a database entry from the cache by dbi. - pub(crate) fn remove_dbi(&self, dbi: ffi::MDBX_dbi) { + fn remove_dbi(&self, dbi: ffi::MDBX_dbi) { let mut cache = self.write(); cache.remove_dbi(dbi); } @@ -120,3 +157,23 @@ impl Default for SharedCache { Self::new() } } + +impl Cache for RefCell { + /// Read a database entry from the cache. + fn read_db(&self, name_hash: u64) -> Option { + let cache = self.borrow(); + cache.read_db(name_hash) + } + + /// Write a database entry to the cache. + fn write_db(&self, db: CachedDb) { + let mut cache = self.borrow_mut(); + cache.write_db(db); + } + + /// Remove a database entry from the cache by dbi. + fn remove_dbi(&self, dbi: ffi::MDBX_dbi) { + let mut cache = self.borrow_mut(); + cache.remove_dbi(dbi); + } +} diff --git a/src/tx/cursor.rs b/src/tx/cursor.rs index 48ec816..7c4672a 100644 --- a/src/tx/cursor.rs +++ b/src/tx/cursor.rs @@ -1,10 +1,11 @@ use crate::{ - Database, MdbxError, RW, ReadResult, TableObject, TransactionKind, codec_try_optional, + Database, MdbxError, ReadResult, TableObject, TransactionKind, codec_try_optional, error::{MdbxResult, mdbx_result}, flags::*, tx::{ TxPtrAccess, assertions, iter::{Iter, IterDup, IterDupVals, IterKeyVals}, + kind::WriteMarker, }, }; use ffi::{ @@ -15,34 +16,44 @@ use ffi::{ }; use std::{ffi::c_void, fmt, marker::PhantomData, ptr}; +/// A read-only cursor for a synchronized transaction. +pub type RoCursorSync<'tx> = Cursor<'tx, crate::RoSync>; + +/// A read-write cursor for a synchronized transaction. +pub type RwCursorSync<'tx> = Cursor<'tx, crate::RwSync>; + +/// A read-only cursor for an unsynchronized transaction. +pub type RoCursorUnsync<'tx> = Cursor<'tx, crate::Ro>; + +/// A read-write cursor for an unsynchronized transaction. +pub type RwCursorUnsync<'tx> = Cursor<'tx, crate::Rw>; + /// A cursor for navigating the items within a database. /// /// The cursor is generic over the transaction kind `K` and the access type `A`. /// The access type determines how the cursor accesses the underlying transaction /// pointer, allowing the same cursor implementation to work with different /// transaction implementations. -pub struct Cursor<'tx, K, A> +pub struct Cursor<'tx, K> where K: TransactionKind, - A: TxPtrAccess, { - access: &'tx A, + access: &'tx K::Access, cursor: *mut ffi::MDBX_cursor, db: Database, _kind: PhantomData, } -impl<'tx, K, A> Cursor<'tx, K, A> +impl<'tx, K> Cursor<'tx, K> where K: TransactionKind, - A: TxPtrAccess, { /// Creates a new cursor from a reference to a transaction access type. - pub(crate) fn new(access: &'tx A, db: Database) -> MdbxResult { + pub(crate) fn new(access: &'tx K::Access, db: Database) -> MdbxResult { let mut cursor: *mut ffi::MDBX_cursor = ptr::null_mut(); access.with_txn_ptr(|txn_ptr| unsafe { mdbx_result(ffi::mdbx_cursor_open(txn_ptr, db.dbi(), &mut cursor)) - })??; + })?; Ok(Self { access, cursor, db, _kind: PhantomData }) } @@ -50,10 +61,11 @@ where /// /// This function must only be used when you are certain that the provided /// cursor pointer is valid and associated with the given access type. - pub(crate) const fn new_raw(access: &'tx A, cursor: *mut ffi::MDBX_cursor, db: Database) -> Self - where - A: Sized, - { + pub(crate) const fn new_raw( + access: &'tx K::Access, + cursor: *mut ffi::MDBX_cursor, + db: Database, + ) -> Self { Self { access, cursor, db, _kind: PhantomData } } @@ -74,10 +86,7 @@ where } /// Returns a reference to the transaction access type. - pub(crate) const fn access(&self) -> &'tx A - where - A: Sized, - { + pub(crate) const fn access(&self) -> &'tx K::Access { self.access } @@ -104,9 +113,7 @@ where /// This can be used to check if the cursor has valid data before /// performing operations that depend on cursor position. pub fn is_eof(&self) -> bool { - self.access - .with_txn_ptr(|_| unsafe { ffi::mdbx_cursor_eof(self.cursor) }) - .unwrap_or(ffi::MDBX_RESULT_TRUE) + self.access.with_txn_ptr(|_| unsafe { ffi::mdbx_cursor_eof(self.cursor) }) == ffi::MDBX_RESULT_TRUE } @@ -173,7 +180,7 @@ where let data_out = Value::decode_val::(txn, data_val)?; Ok((key_out, data_out, v)) } - })? + }) } fn get_value( @@ -453,7 +460,7 @@ where /// For databases with duplicate data items ([`DatabaseFlags::DUP_SORT`]), /// the duplicate data items of each key will be returned before moving on /// to the next key. - pub fn iter<'cur, Key, Value>(&'cur mut self) -> IterKeyVals<'tx, 'cur, K, A, Key, Value> + pub fn iter<'cur, Key, Value>(&'cur mut self) -> IterKeyVals<'tx, 'cur, K, Key, Value> where 'tx: 'cur, Key: TableObject<'tx>, @@ -474,7 +481,7 @@ where /// The iterator will begin with item next after the cursor, and continue /// until the end of the database. For new cursors, the iterator will begin /// with the first item in the database. - pub fn iter_slices<'cur>(&'cur mut self) -> IterKeyVals<'tx, 'cur, K, A> + pub fn iter_slices<'cur>(&'cur mut self) -> IterKeyVals<'tx, 'cur, K> where 'tx: 'cur, { @@ -488,7 +495,7 @@ where /// to the next key. pub fn iter_start<'cur, Key, Value>( &'cur mut self, - ) -> ReadResult> + ) -> ReadResult> where 'tx: 'cur, Key: TableObject<'tx>, @@ -509,7 +516,7 @@ where pub fn iter_from<'cur, Key, Value>( &'cur mut self, key: &[u8], - ) -> ReadResult> + ) -> ReadResult> where 'tx: 'cur, Key: TableObject<'tx>, @@ -534,7 +541,7 @@ where /// /// If the cursor is at EOF or not positioned (e.g., after exhausting a /// previous iteration), it will be repositioned to the first item. - pub fn iter_dup<'cur, Key, Value>(&'cur mut self) -> IterDup<'tx, 'cur, K, A, Key, Value> + pub fn iter_dup<'cur, Key, Value>(&'cur mut self) -> IterDup<'tx, 'cur, K, Key, Value> where Key: TableObject<'tx>, Value: TableObject<'tx>, @@ -552,7 +559,7 @@ where /// database. Each item will be returned as an iterator of its duplicates. pub fn iter_dup_start<'cur, Key, Value>( &'cur mut self, - ) -> ReadResult> + ) -> ReadResult> where 'tx: 'cur, Key: TableObject<'tx>, @@ -570,7 +577,7 @@ where pub fn iter_dup_from<'cur, Key, Value>( &'cur mut self, key: &[u8], - ) -> ReadResult> + ) -> ReadResult> where 'tx: 'cur, Key: TableObject<'tx>, @@ -588,7 +595,7 @@ where pub fn iter_dup_of<'cur, Key, Value>( &'cur mut self, key: &[u8], - ) -> ReadResult> + ) -> ReadResult> where 'tx: 'cur, Key: TableObject<'tx> + PartialEq, @@ -602,10 +609,7 @@ where } } -impl<'tx, A> Cursor<'tx, RW, A> -where - A: TxPtrAccess, -{ +impl<'tx, K: TransactionKind + WriteMarker> Cursor<'tx, K> { /// Puts a key/data pair into the database. The cursor will be positioned at /// the new data item, or on failure usually near it. pub fn put(&mut self, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()> { @@ -624,7 +628,7 @@ where }; let pagesize = stat.ms_psize as usize; assertions::debug_assert_put(pagesize, self.db.flags(), key, data); - })?; + }); let key_val: ffi::MDBX_val = ffi::MDBX_val { iov_len: key.len(), iov_base: key.as_ptr() as *mut c_void }; @@ -632,9 +636,8 @@ where ffi::MDBX_val { iov_len: data.len(), iov_base: data.as_ptr() as *mut c_void }; mdbx_result(self.access.with_txn_ptr(|_| unsafe { ffi::mdbx_cursor_put(self.cursor, &key_val, &mut data_val, flags.bits()) - })?)?; - - Ok(()) + })) + .map(drop) } /// Deletes the current key/data pair. @@ -646,10 +649,9 @@ where pub fn del(&mut self, flags: WriteFlags) -> MdbxResult<()> { mdbx_result( self.access - .with_txn_ptr(|_| unsafe { ffi::mdbx_cursor_del(self.cursor, flags.bits()) })?, - )?; - - Ok(()) + .with_txn_ptr(|_| unsafe { ffi::mdbx_cursor_del(self.cursor, flags.bits()) }), + ) + .map(drop) } /// Appends a key/data pair to the end of the database. @@ -688,9 +690,8 @@ where WriteFlags::APPEND.bits(), ) } - })?)?; - - Ok(()) + })) + .map(drop) } /// Appends duplicate data for [`DatabaseFlags::DUP_SORT`] databases. @@ -734,45 +735,39 @@ where WriteFlags::APPEND_DUP.bits(), ) } - })?)?; - - Ok(()) + })) + .map(drop) } } -impl<'tx, K, A> Clone for Cursor<'tx, K, A> +impl<'tx, K> Clone for Cursor<'tx, K> where K: TransactionKind, - A: TxPtrAccess, { fn clone(&self) -> Self { - self.access.with_txn_ptr(|_| Self::new_at_position(self).unwrap()).unwrap() + self.access.with_txn_ptr(|_| Self::new_at_position(self).unwrap()) } } -impl<'tx, K, A> fmt::Debug for Cursor<'tx, K, A> +impl<'tx, K> fmt::Debug for Cursor<'tx, K> where K: TransactionKind, - A: TxPtrAccess, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Cursor").finish_non_exhaustive() } } -impl<'tx, K, A> Drop for Cursor<'tx, K, A> +impl<'tx, K> Drop for Cursor<'tx, K> where K: TransactionKind, - A: TxPtrAccess, { fn drop(&mut self) { // MDBX cursors MUST be closed. Failure to do so is a memory leak. // // To be able to close a cursor of a timed out transaction, we need to // renew it first. Hence the usage of `with_txn_ptr_for_cleanup` here. - let _ = self - .access - .with_txn_ptr_for_cleanup(|_| unsafe { ffi::mdbx_cursor_close(self.cursor) }); + self.access.with_txn_ptr(|_| unsafe { ffi::mdbx_cursor_close(self.cursor) }); } } @@ -785,27 +780,5 @@ const fn slice_to_val(slice: Option<&[u8]>) -> ffi::MDBX_val { } } -unsafe impl<'tx, K, A> Send for Cursor<'tx, K, A> -where - K: TransactionKind, - A: TxPtrAccess + Sync, -{ -} -unsafe impl<'tx, K, A> Sync for Cursor<'tx, K, A> -where - K: TransactionKind, - A: TxPtrAccess + Sync, -{ -} - -/// A read-only cursor for a synchronized transaction. -pub type RoCursorSync<'tx> = Cursor<'tx, crate::RO, crate::tx::PtrSyncInner>; - -/// A read-write cursor for a synchronized transaction. -pub type RwCursorSync<'tx> = Cursor<'tx, crate::RW, crate::tx::PtrSyncInner>; - -/// A read-only cursor for an unsynchronized transaction. -pub type RoCursorUnsync<'tx> = Cursor<'tx, crate::RO, crate::tx::RoGuard>; - -/// A read-write cursor for an unsynchronized transaction. -pub type RwCursorUnsync<'tx> = Cursor<'tx, crate::RW, crate::tx::RwUnsync>; +unsafe impl<'tx, K> Send for Cursor<'tx, K> where K: TransactionKind {} +unsafe impl<'tx, K> Sync for Cursor<'tx, K> where K: TransactionKind {} diff --git a/src/tx/impl.rs b/src/tx/impl.rs new file mode 100644 index 0000000..9e9b609 --- /dev/null +++ b/src/tx/impl.rs @@ -0,0 +1,752 @@ +use crate::{ + CommitLatency, Cursor, Database, DatabaseFlags, Environment, MdbxError, MdbxResult, ReadResult, + Ro, Rw, Stat, TableObject, TransactionKind, WriteFlags, + error::mdbx_result, + sys::txn_manager::{Begin, Commit, CommitLatencyPtr, RawTxPtr}, + tx::{ + PtrSync, PtrUnsync, TxPtrAccess, + cache::{Cache, CachedDb}, + kind::{RoSync, RwSync, SyncKind, WriteMarker, WriterKind}, + ops, + }, +}; +use core::fmt; +use ffi::MDBX_commit_latency; +use smallvec::SmallVec; +use std::{ + ffi::CStr, + ptr, + sync::{Arc, mpsc::sync_channel}, + thread::sleep, + time::Duration, +}; +use tracing::{debug_span, instrument, warn}; + +/// Transaction type for synchronized access. +pub type TxSync = Tx>; + +/// Transaction type for unsynchronized access. +pub type TxUnsync = Tx; + +/// A synchronized read-only transaction. +pub type RoTxSync = TxSync; + +/// A synchronized read-write transaction. +pub type RwTxSync = TxSync; + +/// An unsynchronized read-only transaction. +pub type RoTxUnsync = TxUnsync; + +// SAFETY: +// - RoTxSync and RwTxSync use Arc which is Send and Sync. +// - K::Cache is ALWAYS Send +// - TxMeta is ALWAYS Send +// - Moving an RO transaction between threads is safe as long as no concurrent +// access occurs, which is guaranteed by being !Sync. +// +// NB: Send is correctly derived for RoTxSync and RwTxSync UNTIL +// you unsafe impl Sync for RoTxUnsync below. This is a quirk I did not know +// about. +unsafe impl Send for RoTxSync {} +unsafe impl Send for RwTxSync {} +unsafe impl Send for RoTxUnsync {} + +// // SAFETY: RoTxUnsync cannot be shared between threads, but can be moved. +// // This satisfies MDBX's requirements for read-only transactions. +// unsafe impl Send for RoTxUnsync {} + +/// An unsynchronized read-write transaction. +pub type RwTxUnsync = TxUnsync; + +/// Meta-data for a transaction. +#[derive(Clone)] +struct TxMeta { + env: Environment, + span: tracing::Span, +} + +impl fmt::Debug for TxMeta { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TxMeta").finish() + } +} + +/// An MDBX transaction. +/// +/// Prefer using the [`TxSync`] or [`TxUnsync`] type aliases, unless +/// specifically implementing generic code over all four transaction kinds. +pub struct Tx::Access> { + txn: U, + + cache: K::Cache, + + meta: TxMeta, +} + +impl fmt::Debug for Tx { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Tx").finish_non_exhaustive() + } +} + +impl Clone for Tx> +where + K: TransactionKind>, +{ + fn clone(&self) -> Self { + Self { txn: Arc::clone(&self.txn), cache: self.cache.clone(), meta: self.meta.clone() } + } +} + +impl Tx { + /// Creates a new transaction wrapper. + pub(crate) fn from_access_and_env(txn: K::Access, env: Environment) -> Self { + let span = K::new_span(txn.tx_id().unwrap_or_default()); + let meta = TxMeta { env, span }; + let cache = K::Cache::default(); + Self { txn, cache, meta } + } + + /// Creates a new transaction wrapper from raw pointer and environment. + pub(crate) fn from_ptr_and_env(ptr: *mut ffi::MDBX_txn, env: Environment) -> Self { + let tx = K::Access::from_ptr_and_env(ptr, env.clone(), K::IS_READ_ONLY); + Self::from_access_and_env(tx, env) + } + + /// Returns a reference to the environment. + #[inline(always)] + pub const fn env(&self) -> &Environment { + &self.meta.env + } + + /// Returns the tracing span for this transaction. + #[inline(always)] + pub const fn span(&self) -> &tracing::Span { + &self.meta.span + } +} + +impl RoTxSync { + pub(crate) fn begin(env: Environment) -> Result { + let tx = RoSync::new_from_env(env.clone())?; + Ok(Self::from_access_and_env(tx, env)) + } +} + +impl RwTxUnsync { + pub(crate) fn begin(env: Environment) -> Result { + let tx = Rw::new_from_env(env.clone())?; + Ok(Self::from_access_and_env(tx, env)) + } +} + +impl RoTxUnsync { + pub(crate) fn begin(env: Environment) -> Result { + let tx = Ro::new_from_env(env.clone())?; + Ok(Self::from_access_and_env(tx, env)) + } +} + +// Unified implementations for all transaction kinds. +impl Tx +where + K: TransactionKind, +{ + /// Provides access to the raw transaction pointer. + fn with_txn_ptr(&self, f: F) -> R + where + F: FnOnce(*mut ffi::MDBX_txn) -> R, + { + self.txn.with_txn_ptr(f) + } + + /// Returns the transaction id. + #[inline(always)] + pub fn id(&self) -> MdbxResult { + self.with_txn_ptr(|txn_ptr| Ok(unsafe { ffi::mdbx_txn_id(txn_ptr) })) + } + + /// Gets an item from a database. + pub fn get<'a, Key>(&'a self, dbi: ffi::MDBX_dbi, key: &[u8]) -> ReadResult> + where + Key: TableObject<'a>, + { + self.with_txn_ptr(|txn_ptr| { + // SAFETY: txn_ptr is valid from with_txn_ptr. + unsafe { + let data_val = ops::get_raw(txn_ptr, dbi, key)?; + data_val.map(|val| Key::decode_val::(txn_ptr, val)).transpose() + } + }) + } + + /// Opens a handle to an MDBX database. + pub fn open_db(&self, name: Option<&str>) -> MdbxResult { + let name_hash = CachedDb::hash_name(name); + + if let Some(db) = self.cache.read_db(name_hash) { + return Ok(db); + } + + self.open_and_cache_with_flags(name, DatabaseFlags::empty()).map(Into::into) + } + + /// Opens a database handle without using the cache. + pub fn open_db_no_cache(&self, name: Option<&str>) -> MdbxResult { + self.open_db_with_flags(name, DatabaseFlags::empty()).map(Into::into) + } + + fn open_and_cache_with_flags( + &self, + name: Option<&str>, + flags: DatabaseFlags, + ) -> MdbxResult { + let db = self.open_db_with_flags(name, flags)?; + self.cache.write_db(db); + Ok(db) + } + + fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { + let mut c_name_buf = SmallVec::<[u8; 32]>::new(); + let c_name = name.map(|n| { + c_name_buf.extend_from_slice(n.as_bytes()); + c_name_buf.push(0); + CStr::from_bytes_with_nul(&c_name_buf).unwrap() + }); + let name_ptr = c_name.as_ref().map_or(ptr::null(), |s| s.as_ptr()); + + let (dbi, db_flags) = self.with_txn_ptr(|txn_ptr| { + // SAFETY: txn_ptr is valid from with_txn_ptr, name_ptr is valid or null. + unsafe { ops::open_db_raw(txn_ptr, name_ptr, flags) } + })?; + + Ok(CachedDb::new(name, Database::new(dbi, db_flags))) + } + + /// Gets the option flags for the given database. + pub fn db_flags(&self, name: Option<&str>) -> MdbxResult { + let db = self.open_db(name)?; + self.db_flags_by_dbi(db.dbi()) + } + + /// Gets the option flags for the given database. + pub fn db_flags_by_dbi(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { + self.with_txn_ptr(|txn_ptr| { + // SAFETY: txn_ptr is valid from with_txn_ptr. + unsafe { ops::db_flags_raw(txn_ptr, dbi) } + }) + } + + /// Retrieves database statistics. + pub fn db_stat(&self, db: &Database) -> MdbxResult { + self.db_stat_by_dbi(db.dbi()) + } + + /// Retrieves database statistics by the given dbi. + pub fn db_stat_by_dbi(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { + self.with_txn_ptr(|txn| { + // SAFETY: txn is a valid transaction pointer from with_txn_ptr. + unsafe { ops::db_stat_raw(txn, dbi) } + }) + } + + /// Closes the database handle. + /// + /// # Safety + /// + /// This will invalidate data cached in [`Database`] instances with the + /// DBI, and may result in bad behavior when using those instances after + /// calling this function. + pub unsafe fn close_db(&self, dbi: ffi::MDBX_dbi) -> MdbxResult<()> { + // SAFETY: Caller ensures no other references exist. + unsafe { ops::close_db_raw(self.meta.env.env_ptr(), dbi) }?; + self.cache.remove_dbi(dbi); + Ok(()) + } + + /// Opens a cursor on the given database. + /// + /// Multiple cursors can be open simultaneously on different databases + /// within the same transaction. The cursor borrows the transaction's + /// inner access type, allowing concurrent cursor operations. + pub fn cursor(&self, db: Database) -> MdbxResult> { + Cursor::new(&self.txn, db) + } +} + +// Write-only +impl Tx { + /// Opens a handle to an MDBX database, creating the database if necessary. + /// + /// If the database is already created, the given option flags will be + /// added to it. + /// + /// If `name` is [None], then the returned handle will be for the default + /// database. + /// + /// If `name` is not [None], then the returned handle will be for a named + /// database. In this case the environment must be configured to allow + /// named databases through [`EnvironmentBuilder::set_max_dbs()`]. + /// + /// This function will fail with [`MdbxError::BadRslot`] if called by a + /// thread with an open transaction. + /// + /// [`EnvironmentBuilder::set_max_dbs()`]: crate::EnvironmentBuilder::set_max_dbs + pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { + self.open_db_with_flags(name, flags | DatabaseFlags::CREATE).map(Into::into) + } + + /// Stores an item into a database. + /// + /// This function stores key/data pairs in the database. The default + /// behavior is to enter the new key/data pair, replacing any previously + /// existing key if duplicates are disallowed, or adding a duplicate data + /// item if duplicates are allowed ([`DatabaseFlags::DUP_SORT`]). + pub fn put( + &self, + db: Database, + key: impl AsRef<[u8]>, + data: impl AsRef<[u8]>, + flags: WriteFlags, + ) -> MdbxResult<()> { + let key = key.as_ref(); + let data = data.as_ref(); + + #[cfg(debug_assertions)] + { + use crate::tx::assertions; + + let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); + assertions::debug_assert_put(pagesize, db.flags(), key, data); + } + + self.with_txn_ptr(|txn| { + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { ops::put_raw(txn, db.dbi(), key, data, flags) } + }) + } + + /// Appends a key/data pair to the end of the database. + /// + /// The key must be greater than all existing keys (or less than, for + /// [`DatabaseFlags::REVERSE_KEY`] tables). This is more efficient than + /// [`TxSync::put`] when adding data in sorted order. + /// + /// In debug builds, this method asserts that the key ordering constraint is + /// satisfied. + pub fn append( + &self, + db: Database, + key: impl AsRef<[u8]>, + data: impl AsRef<[u8]>, + ) -> MdbxResult<()> { + let key = key.as_ref(); + let data = data.as_ref(); + + self.with_txn_ptr(|txn| { + #[cfg(debug_assertions)] + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { + ops::debug_assert_append(txn, db.dbi(), db.flags(), key, data); + } + + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { ops::put_raw(txn, db.dbi(), key, data, WriteFlags::APPEND) } + }) + } + + /// Appends duplicate data for [`DatabaseFlags::DUP_SORT`] databases. + /// + /// The data must be greater than all existing data for this key (or less + /// than, for [`DatabaseFlags::REVERSE_DUP`] tables). This is more efficient + /// than [`TxSync::put`] when adding duplicates in sorted order. + /// + /// Returns [`MdbxError::RequiresDupSort`] if the database does not have the + /// [`DatabaseFlags::DUP_SORT`] flag set. + /// + /// In debug builds, this method asserts that the data ordering constraint + /// is satisfied. + pub fn append_dup( + &self, + db: Database, + key: impl AsRef<[u8]>, + data: impl AsRef<[u8]>, + ) -> MdbxResult<()> { + if !db.flags().contains(DatabaseFlags::DUP_SORT) { + return Err(MdbxError::RequiresDupSort); + } + let key = key.as_ref(); + let data = data.as_ref(); + + self.with_txn_ptr(|txn| { + #[cfg(debug_assertions)] + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { + ops::debug_assert_append_dup(txn, db.dbi(), db.flags(), key, data); + } + + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { ops::put_raw(txn, db.dbi(), key, data, WriteFlags::APPEND_DUP) } + }) + } + + /// Returns a buffer which can be used to write a value into the item at the + /// given key and with the given length. The buffer must be completely + /// filled by the caller. + /// + /// This should not be used on dupsort tables. + /// + /// # Safety + /// + /// The caller must ensure that the returned buffer is not used after the + /// transaction is committed or aborted, or if another value is inserted. + /// To be clear: the second call to this function is not permitted while + /// the returned slice is reachable. + #[allow(clippy::mut_from_ref)] + pub unsafe fn reserve( + &self, + db: Database, + key: impl AsRef<[u8]>, + len: usize, + flags: WriteFlags, + ) -> MdbxResult<&mut [u8]> { + let key = key.as_ref(); + + #[cfg(debug_assertions)] + { + use crate::tx::assertions; + + let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); + assertions::debug_assert_key(pagesize, db.flags(), key); + } + + let ptr = self.with_txn_ptr(|txn| { + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { ops::reserve_raw(txn, db.dbi(), key, len, flags) } + })?; + // SAFETY: ptr is valid from reserve_raw, len matches. + Ok(unsafe { ops::slice_from_reserved(ptr, len) }) + } + + /// Reserves space for a value of the given length at the given key, and + /// calls the given closure with a mutable slice to write into. + /// + /// This is a safe wrapper around [`TxSync::reserve`]. + pub fn with_reservation( + &self, + db: Database, + key: impl AsRef<[u8]>, + len: usize, + flags: WriteFlags, + f: impl FnOnce(&mut [u8]), + ) -> MdbxResult<()> { + let buf = unsafe { self.reserve(db, key, len, flags)? }; + f(buf); + Ok(()) + } + + /// Delete items from a database. + /// This function removes key/data pairs from the database. + /// + /// The data parameter is NOT ignored regardless the database does support + /// sorted duplicate data items or not. If the data parameter is [Some] + /// only the matching data item will be deleted. Otherwise, if data + /// parameter is [None], any/all value(s) for specified key will + /// be deleted. + /// + /// Returns `true` if the key/value pair was present. + pub fn del( + &self, + db: Database, + key: impl AsRef<[u8]>, + data: Option<&[u8]>, + ) -> MdbxResult { + let key = key.as_ref(); + + #[cfg(debug_assertions)] + { + use crate::tx::assertions; + + let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); + assertions::debug_assert_key(pagesize, db.flags(), key); + if let Some(v) = data { + assertions::debug_assert_value(pagesize, db.flags(), v); + } + } + + self.with_txn_ptr(|txn| { + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { ops::del_raw(txn, db.dbi(), key, data) } + }) + } + + /// Empties the given database. All items will be removed. + pub fn clear_db(&self, db: Database) -> MdbxResult<()> { + self.with_txn_ptr(|txn| { + // SAFETY: txn is a valid RW transaction pointer from with_txn_ptr. + unsafe { ops::clear_db_raw(txn, db.dbi()) } + }) + } + + /// Drops the database from the environment. + /// + /// # Safety + /// + /// Caller must ensure no [`Cursor`] or other references to the database + /// exist. [`Database`] instances with the DBI will be invalidated, and + /// use after calling this function may result in bad behavior. + pub unsafe fn drop_db(&self, db: Database) -> MdbxResult<()> { + self.with_txn_ptr(|txn| { + // SAFETY: txn is a valid RW transaction pointer, caller ensures + // no other references to dbi exist. + unsafe { ops::drop_db_raw(txn, db.dbi()) } + })?; + + self.cache.remove_dbi(db.dbi()); + + Ok(()) + } +} + +// Differentiated Commit implementations for Sync and Unsync transaction +// pointers. +impl Tx> +where + K: TransactionKind>, +{ + /// Commits the transaction. + /// + /// Any pending operations will be saved. + /// + /// SAFETY: latency pointer must be valid for the duration of the commit. + fn commit_inner(self, latency: *mut MDBX_commit_latency) -> MdbxResult<()> { + let was_aborted = self.with_txn_ptr(|txn| { + if K::IS_READ_ONLY { + mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(txn, latency) }) + } else { + let (sender, rx) = sync_channel(0); + self.env().txn_manager().send(Commit { + tx: RawTxPtr(txn), + latency: CommitLatencyPtr(latency), + span: debug_span!("tx_manager_commit"), + sender, + }); + rx.recv().unwrap() + } + })?; + + self.txn.mark_committed(); + + if was_aborted { + tracing::warn!(target: "libmdbx", "botched"); + return Err(MdbxError::BotchedTransaction); + } + + Ok(()) + } + + /// Commits the transaction. + #[instrument(skip(self), parent = &self.meta.span)] + pub fn commit(self) -> MdbxResult<()> { + self.commit_inner(ptr::null_mut()) + } + + /// Commits the transaction, returning commit latency information. + #[instrument(skip(self), parent = &self.meta.span)] + pub fn commit_with_latency(self) -> MdbxResult { + let mut latency = CommitLatency::new(); + + self.commit_inner(latency.mdb_commit_latency())?; + + tracing::debug!(latency_whole_ms = latency.whole().as_millis(), "commit latency"); + Ok(latency) + } +} + +impl Tx +where + K: TransactionKind, +{ + /// Commits the transaction (inner implementation). + fn commit_inner(self, latency: *mut ffi::MDBX_commit_latency) -> MdbxResult<()> { + // Self is dropped at end of function, so RwTxPtr::drop will be within + // span scope. + let _guard = self.meta.span.clone().entered(); + + // SAFETY: txn_ptr is valid from with_txn_ptr. + let was_aborted = + self.with_txn_ptr(|txn_ptr| unsafe { ops::commit_raw(txn_ptr, latency) })?; + + self.txn.mark_committed(); + + if was_aborted { + tracing::warn!(target: "libmdbx", "botched"); + return Err(MdbxError::BotchedTransaction); + } + + Ok(()) + } + + /// Commits the transaction. + #[instrument(skip(self), parent = &self.meta.span)] + pub fn commit(self) -> MdbxResult<()> { + self.commit_inner(ptr::null_mut()) + } + + /// Commits the transaction, returning commit latency information. + #[instrument(skip(self), parent = &self.meta.span)] + pub fn commit_with_latency(self) -> MdbxResult { + let mut latency = CommitLatency::new(); + + self.commit_inner(latency.mdb_commit_latency())?; + + tracing::debug!(latency_whole_ms = latency.whole().as_millis(), "commit latency"); + Ok(latency) + } +} + +// Differentiated nested transaction implementations for Sync and Unsync +// transaction pointers. +impl Tx> +where + K: TransactionKind> + WriteMarker, +{ + /// Begins a new [`RwTxSync`] transaction. + pub fn begin(env: Environment) -> MdbxResult { + let mut warned = false; + let txn = loop { + let (tx, rx) = sync_channel(0); + env.txn_manager().send(Begin { + parent: RawTxPtr(ptr::null_mut()), + flags: Rw::OPEN_FLAGS, + sender: tx, + span: debug_span!("txn_manager_begin"), + }); + let res = rx.recv().unwrap(); + if matches!(&res, Err(MdbxError::Busy)) { + if !warned { + warned = true; + warn!(target: "libmdbx", "Process stalled, awaiting read-write transaction lock."); + } + sleep(Duration::from_millis(250)); + continue; + } + + break res; + }?; + + Ok(Self::from_ptr_and_env(txn.0, env)) + } + + /// Begins a new nested transaction inside of this transaction. + pub fn begin_nested_txn(&self) -> MdbxResult { + if self.env().is_write_map() { + return Err(MdbxError::NestedTransactionsUnsupportedWithWriteMap); + } + self.with_txn_ptr(|txn| { + let (tx, rx) = sync_channel(0); + self.env().txn_manager().send(Begin { + parent: RawTxPtr(txn), + flags: Rw::OPEN_FLAGS, + sender: tx, + span: debug_span!("tx_manager_begin_nested"), + }); + + rx.recv().unwrap().map(|txn| Self::from_ptr_and_env(txn.0, self.env().clone())) + }) + } +} + +impl Tx +where + K: TransactionKind + WriteMarker, +{ + /// Begins a new nested transaction inside of this transaction. + pub fn begin_nested_txn(&mut self) -> MdbxResult { + if self.env().is_write_map() { + return Err(MdbxError::NestedTransactionsUnsupportedWithWriteMap); + } + self.with_txn_ptr(|txn_ptr| { + // SAFETY: txn_ptr is valid from with_txn_ptr. + unsafe { + let mut nested_txn: *mut ffi::MDBX_txn = ptr::null_mut(); + mdbx_result(ffi::mdbx_txn_begin_ex( + self.env().env_ptr(), + txn_ptr, + Rw::OPEN_FLAGS, + &mut nested_txn, + ptr::null_mut(), + ))?; + Ok(Self::from_ptr_and_env(nested_txn, self.env().clone())) + } + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::tempdir; + + #[test] + fn test_basic_rw_operations() { + let dir = tempdir().unwrap(); + let env = Environment::builder().open(dir.path()).unwrap(); + + // Write data + let txn = TxUnsync::::begin(env.clone()).unwrap(); + let db = txn.create_db(None, DatabaseFlags::empty()).unwrap(); + txn.put(db, b"key1", b"value1", WriteFlags::empty()).unwrap(); + txn.put(db, b"key2", b"value2", WriteFlags::empty()).unwrap(); + txn.commit().unwrap(); + + // Read data + let txn = TxUnsync::::begin(env.clone()).unwrap(); + + let db = txn.open_db(None).unwrap(); + let value: Option> = txn.get(db.dbi(), b"key1").unwrap(); + assert_eq!(value.as_deref(), Some(b"value1".as_slice())); + + let value: Option> = txn.get(db.dbi(), b"key2").unwrap(); + assert_eq!(value.as_deref(), Some(b"value2".as_slice())); + + let value: Option> = txn.get(db.dbi(), b"nonexistent").unwrap(); + assert!(value.is_none()); + } + + #[test] + fn test_db_cache() { + let dir = tempdir().unwrap(); + let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap(); + + // Create named DBs + { + let txn = TxUnsync::::begin(env.clone()).unwrap(); + txn.create_db(Some("db1"), DatabaseFlags::empty()).unwrap(); + txn.create_db(Some("db2"), DatabaseFlags::empty()).unwrap(); + txn.commit().unwrap(); + } + + let txn = TxUnsync::::begin(env.clone()).unwrap(); + + let db1_a = txn.open_db(Some("db1")).unwrap(); + let db1_b = txn.open_db(Some("db1")).unwrap(); + let db2 = txn.open_db(Some("db2")).unwrap(); + + assert_eq!(db1_a.dbi(), db1_b.dbi()); + assert_ne!(db1_a.dbi(), db2.dbi()); + } + + fn __compile_checks() { + fn assert_sync() {} + assert_sync::(); + assert_sync::(); + assert_sync::(); + + fn assert_send() {} + assert_send::(); + assert_send::(); + assert_send::(); + assert_send::(); + } +} diff --git a/src/tx/iter.rs b/src/tx/iter.rs index fa68d86..96636b6 100644 --- a/src/tx/iter.rs +++ b/src/tx/iter.rs @@ -44,7 +44,7 @@ //! # use signet_libmdbx::Environment; //! # use std::path::Path; //! # let env = Environment::builder().open(Path::new("/tmp/iter_example")).unwrap(); -//! let txn = env.begin_ro_txn().unwrap(); +//! let txn = env.begin_ro_sync().unwrap(); //! let db = txn.open_db(None).unwrap(); //! let mut cursor = txn.cursor(db).unwrap(); //! @@ -61,21 +61,37 @@ use crate::{ }; use std::{borrow::Cow, marker::PhantomData, ptr}; +/// A key-value iterator for a synchronized read-only transaction. +pub type RoIterSync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + IterKeyVals<'tx, 'cur, crate::RoSync, Key, Value>; + +/// A key-value iterator for a synchronized read-write transaction. +pub type RwIterSync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + IterKeyVals<'tx, 'cur, crate::RwSync, Key, Value>; + +/// A key-value iterator for an unsynchronized read-only transaction. +pub type RoIterUnsync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + IterKeyVals<'tx, 'cur, crate::Ro, Key, Value>; + +/// A key-value iterator for an unsynchronized read-write transaction. +pub type RwIterUnsync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + IterKeyVals<'tx, 'cur, crate::Rw, Key, Value>; + /// Iterates over KV pairs in an MDBX database. -pub type IterKeyVals<'tx, 'cur, K, A, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - Iter<'tx, 'cur, K, A, Key, Value, { ffi::MDBX_NEXT }>; +pub type IterKeyVals<'tx, 'cur, K, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + Iter<'tx, 'cur, K, Key, Value, { ffi::MDBX_NEXT }>; /// An iterator over the key/value pairs in an MDBX `DUPSORT` with duplicate /// keys, yielding the first value for each key. /// /// See the [`Iter`] documentation for more details. -pub type IterDupKeys<'tx, 'cur, K, A, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - Iter<'tx, 'cur, K, A, Key, Value, { ffi::MDBX_NEXT_NODUP }>; +pub type IterDupKeys<'tx, 'cur, K, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + Iter<'tx, 'cur, K, Key, Value, { ffi::MDBX_NEXT_NODUP }>; /// An iterator over the key/value pairs in an MDBX `DUPSORT`, yielding each /// duplicate value for a specific key. -pub type IterDupVals<'tx, 'cur, K, A, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - Iter<'tx, 'cur, K, A, Key, Value, { ffi::MDBX_NEXT_DUP }>; +pub type IterDupVals<'tx, 'cur, K, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = + Iter<'tx, 'cur, K, Key, Value, { ffi::MDBX_NEXT_DUP }>; /// An iterator over the key/value pairs in an MDBX database. /// @@ -93,12 +109,11 @@ pub struct Iter< 'tx, 'cur, K: TransactionKind, - A: TxPtrAccess, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>, const OP: u32 = { ffi::MDBX_NEXT }, > { - cursor: Cow<'cur, Cursor<'tx, K, A>>, + cursor: Cow<'cur, Cursor<'tx, K>>, /// Pre-fetched value from cursor positioning, yielded before calling FFI. pending: Option<(Key, Value)>, /// When true, the iterator is exhausted and will always return `None`. @@ -106,10 +121,9 @@ pub struct Iter< _marker: PhantomData (Key, Value)>, } -impl core::fmt::Debug for Iter<'_, '_, K, A, Key, Value, OP> +impl core::fmt::Debug for Iter<'_, '_, K, Key, Value, OP> where K: TransactionKind, - A: TxPtrAccess, Key: core::fmt::Debug, Value: core::fmt::Debug, { @@ -118,62 +132,57 @@ where } } -impl<'tx: 'cur, 'cur, K, A, Key, Value, const OP: u32> Iter<'tx, 'cur, K, A, Key, Value, OP> +impl<'tx: 'cur, 'cur, K, Key, Value, const OP: u32> Iter<'tx, 'cur, K, Key, Value, OP> where K: TransactionKind, - A: TxPtrAccess, { /// Create a new iterator from the given cursor, starting at the given /// position. - pub(crate) fn new(cursor: Cow<'cur, Cursor<'tx, K, A>>) -> Self { + pub(crate) fn new(cursor: Cow<'cur, Cursor<'tx, K>>) -> Self { Iter { cursor, pending: None, exhausted: false, _marker: PhantomData } } /// Create a new iterator from a mutable reference to the given cursor, - pub(crate) fn from_ref(cursor: &'cur mut Cursor<'tx, K, A>) -> Self { + pub(crate) fn from_ref(cursor: &'cur mut Cursor<'tx, K>) -> Self { Self::new(Cow::Borrowed(cursor)) } /// Create a new iterator that is already exhausted. /// /// Iteration will immediately return `None`. - pub(crate) fn new_end(cursor: Cow<'cur, Cursor<'tx, K, A>>) -> Self { + pub(crate) fn new_end(cursor: Cow<'cur, Cursor<'tx, K>>) -> Self { Iter { cursor, pending: None, exhausted: true, _marker: PhantomData } } /// Create a new, exhausted iterator from a mutable reference to the given /// cursor. This is usually used as a placeholder when no items are to be /// yielded. - pub(crate) fn end_from_ref(cursor: &'cur mut Cursor<'tx, K, A>) -> Self { + pub(crate) fn end_from_ref(cursor: &'cur mut Cursor<'tx, K>) -> Self { Self::new_end(Cow::Borrowed(cursor)) } /// Create a new iterator from the given cursor, first yielding the /// provided key/value pair. - pub(crate) fn new_with(cursor: Cow<'cur, Cursor<'tx, K, A>>, first: (Key, Value)) -> Self { + pub(crate) fn new_with(cursor: Cow<'cur, Cursor<'tx, K>>, first: (Key, Value)) -> Self { Iter { cursor, pending: Some(first), exhausted: false, _marker: PhantomData } } /// Create a new iterator from a mutable reference to the given cursor, /// first yielding the provided key/value pair. - pub(crate) fn from_ref_with(cursor: &'cur mut Cursor<'tx, K, A>, first: (Key, Value)) -> Self { + pub(crate) fn from_ref_with(cursor: &'cur mut Cursor<'tx, K>, first: (Key, Value)) -> Self { Self::new_with(Cow::Borrowed(cursor), first) } /// Create a new iterator from an owned cursor, first yielding the /// provided key/value pair. - pub(crate) fn from_owned_with(cursor: Cursor<'tx, K, A>, first: (Key, Value)) -> Self - where - A: Sized, - { + pub(crate) fn from_owned_with(cursor: Cursor<'tx, K>, first: (Key, Value)) -> Self { Self::new_with(Cow::Owned(cursor), first) } } -impl Iter<'_, '_, K, A, Key, Value, OP> +impl Iter<'_, '_, K, Key, Value, OP> where K: TransactionKind, - A: TxPtrAccess, Key: TableObjectOwned, Value: TableObjectOwned, { @@ -189,10 +198,9 @@ where } } -impl<'tx: 'cur, 'cur, K, A, Key, Value, const OP: u32> Iter<'tx, 'cur, K, A, Key, Value, OP> +impl<'tx: 'cur, 'cur, K, Key, Value, const OP: u32> Iter<'tx, 'cur, K, Key, Value, OP> where K: TransactionKind, - A: TxPtrAccess, Key: TableObject<'tx>, Value: TableObject<'tx>, { @@ -221,7 +229,7 @@ where ffi::MDBX_NOTFOUND | ffi::MDBX_ENODATA | ffi::MDBX_RESULT_TRUE => Ok(None), other => Err(MdbxError::from_err_code(other).into()), } - })? + }) } /// Borrow the next key/value pair from the iterator. @@ -240,10 +248,9 @@ where } } -impl Iterator for Iter<'_, '_, K, A, Key, Value, OP> +impl Iterator for Iter<'_, '_, K, Key, Value, OP> where K: TransactionKind, - A: TxPtrAccess, Key: TableObjectOwned, Value: TableObjectOwned, { @@ -256,21 +263,13 @@ where /// An iterator over the key/value pairs in an MDBX database with duplicate /// keys. -pub struct IterDup< - 'tx, - 'cur, - K: TransactionKind, - A: TxPtrAccess, - Key = Cow<'tx, [u8]>, - Value = Cow<'tx, [u8]>, -> { - inner: IterDupKeys<'tx, 'cur, K, A, Key, Value>, +pub struct IterDup<'tx, 'cur, K: TransactionKind, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> { + inner: IterDupKeys<'tx, 'cur, K, Key, Value>, } -impl<'tx, 'cur, K, A, Key, Value> core::fmt::Debug for IterDup<'tx, 'cur, K, A, Key, Value> +impl<'tx, 'cur, K, Key, Value> core::fmt::Debug for IterDup<'tx, 'cur, K, Key, Value> where K: TransactionKind, - A: TxPtrAccess, Key: core::fmt::Debug, Value: core::fmt::Debug, { @@ -279,71 +278,63 @@ where } } -impl<'tx, 'cur, K, A, Key, Value> IterDup<'tx, 'cur, K, A, Key, Value> +impl<'tx, 'cur, K, Key, Value> IterDup<'tx, 'cur, K, Key, Value> where K: TransactionKind, - A: TxPtrAccess, { /// Create a new iterator from the given cursor, starting at the given /// position. - pub(crate) fn new(cursor: Cow<'cur, Cursor<'tx, K, A>>) -> Self { + pub(crate) fn new(cursor: Cow<'cur, Cursor<'tx, K>>) -> Self { IterDup { inner: IterDupKeys::new(cursor) } } /// Create a new iterator from a mutable reference to the given cursor, - pub(crate) fn from_ref(cursor: &'cur mut Cursor<'tx, K, A>) -> Self { + pub(crate) fn from_ref(cursor: &'cur mut Cursor<'tx, K>) -> Self { Self::new(Cow::Borrowed(cursor)) } /// Create a new iterator from an owned cursor. - pub fn from_owned(cursor: Cursor<'tx, K, A>) -> Self - where - A: Sized, - { + pub fn from_owned(cursor: Cursor<'tx, K>) -> Self { Self::new(Cow::Owned(cursor)) } /// Create a new iterator from the given cursor, the inner iterator will /// first yield the provided key/value pair. - pub(crate) fn new_with(cursor: Cow<'cur, Cursor<'tx, K, A>>, first: (Key, Value)) -> Self { + pub(crate) fn new_with(cursor: Cow<'cur, Cursor<'tx, K>>, first: (Key, Value)) -> Self { IterDup { inner: Iter::new_with(cursor, first) } } /// Create a new iterator from a mutable reference to the given cursor, /// first yielding the provided key/value pair. - pub fn from_ref_with(cursor: &'cur mut Cursor<'tx, K, A>, first: (Key, Value)) -> Self { + pub fn from_ref_with(cursor: &'cur mut Cursor<'tx, K>, first: (Key, Value)) -> Self { Self::new_with(Cow::Borrowed(cursor), first) } /// Create a new iterator from the given cursor, with no items to yield. - pub fn new_end(cursor: Cow<'cur, Cursor<'tx, K, A>>) -> Self { + pub fn new_end(cursor: Cow<'cur, Cursor<'tx, K>>) -> Self { IterDup { inner: Iter::new_end(cursor) } } /// Create a new iterator from a mutable reference to the given cursor, with /// no items to yield. - pub fn end_from_ref(cursor: &'cur mut Cursor<'tx, K, A>) -> Self { + pub fn end_from_ref(cursor: &'cur mut Cursor<'tx, K>) -> Self { Self::new_end(Cow::Borrowed(cursor)) } /// Create a new iterator from an owned cursor, with no items to yield. - pub fn end_from_owned(cursor: Cursor<'tx, K, A>) -> Self - where - A: Sized, - { + pub fn end_from_owned(cursor: Cursor<'tx, K>) -> Self { Self::new_end(Cow::Owned(cursor)) } } -impl<'tx: 'cur, 'cur, K, A, Key, Value> IterDup<'tx, 'cur, K, A, Key, Value> +impl<'tx: 'cur, 'cur, K, Key, Value> IterDup<'tx, 'cur, K, Key, Value> where K: TransactionKind, - A: TxPtrAccess + 'tx, Key: TableObject<'tx>, Value: TableObject<'tx>, { /// Borrow the next key/value pair from the iterator. - pub fn borrow_next(&mut self) -> ReadResult>> { + pub fn borrow_next(&mut self) -> ReadResult>> { // We want to use Cursor::new_at_position to create a new cursor, // but the kv pair may be borrowed from the inner cursor, so we need to // store the references first. This is just to avoid borrow checker @@ -353,8 +344,7 @@ where // SAFETY: the access lives as long as self.inner.cursor, and the cursor op // we perform does not invalidate the data borrowed from the inner // cursor in borrow_next. - let access: *const A = self.inner.cursor.access(); - let access = unsafe { access.as_ref().unwrap() }; + let access = self.inner.cursor.access(); // The next will be the FIRST KV pair for the NEXT key in the DUPSORT match self.inner.borrow_next()? { @@ -369,7 +359,7 @@ where let res = ffi::mdbx_cursor_copy(cursor_ptr, new_cursor); mdbx_result(res)?; Ok::<_, MdbxError>(Cursor::new_raw(access, new_cursor, db)) - })??; + })?; Ok(Some(IterDupVals::from_owned_with(dup_cursor, (key, value)))) } @@ -378,45 +368,27 @@ where } } -impl<'tx: 'cur, 'cur, K, A, Key, Value> IterDup<'tx, 'cur, K, A, Key, Value> +impl<'tx: 'cur, 'cur, K, Key, Value> IterDup<'tx, 'cur, K, Key, Value> where K: TransactionKind, - A: TxPtrAccess + 'tx, Key: TableObjectOwned, Value: TableObjectOwned, { /// Own the next key/value pair from the iterator. - pub fn owned_next(&mut self) -> ReadResult>> { + pub fn owned_next(&mut self) -> ReadResult>> { self.borrow_next() } } -impl<'tx: 'cur, 'cur, K, A, Key, Value> Iterator for IterDup<'tx, 'cur, K, A, Key, Value> +impl<'tx: 'cur, 'cur, K, Key, Value> Iterator for IterDup<'tx, 'cur, K, Key, Value> where K: TransactionKind, - A: TxPtrAccess + 'tx, Key: TableObjectOwned, Value: TableObjectOwned, { - type Item = ReadResult>; + type Item = ReadResult>; fn next(&mut self) -> Option { self.owned_next().transpose() } } - -/// A key-value iterator for a synchronized read-only transaction. -pub type RoIterSync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - IterKeyVals<'tx, 'cur, crate::RO, crate::tx::PtrSyncInner, Key, Value>; - -/// A key-value iterator for a synchronized read-write transaction. -pub type RwIterSync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - IterKeyVals<'tx, 'cur, crate::RW, crate::tx::PtrSyncInner, Key, Value>; - -/// A key-value iterator for an unsynchronized read-only transaction. -pub type RoIterUnsync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - IterKeyVals<'tx, 'cur, crate::RO, crate::tx::RoGuard, Key, Value>; - -/// A key-value iterator for an unsynchronized read-write transaction. -pub type RwIterUnsync<'tx, 'cur, Key = Cow<'tx, [u8]>, Value = Cow<'tx, [u8]>> = - IterKeyVals<'tx, 'cur, crate::RW, crate::tx::RwUnsync, Key, Value>; diff --git a/src/tx/kind.rs b/src/tx/kind.rs index 6e82466..acb7e2e 100644 --- a/src/tx/kind.rs +++ b/src/tx/kind.rs @@ -1,54 +1,143 @@ -use crate::tx::{ - TxPtrAccess, - access::{RoGuard, RwUnsync}, +use std::{cell::RefCell, fmt::Debug, ptr, sync::Arc}; + +use crate::{ + Environment, MdbxResult, + error::mdbx_result, + tx::{ + PtrSync, TxPtrAccess, + access::PtrUnsync, + cache::{Cache, DbCache, SharedCache}, + }, }; use ffi::{MDBX_TXN_RDONLY, MDBX_TXN_READWRITE, MDBX_txn_flags_t}; mod private { pub trait Sealed {} - impl Sealed for super::RO {} - impl Sealed for super::RW {} + impl Sealed for super::Ro {} + impl Sealed for super::Rw {} + impl Sealed for super::RwSync {} + impl Sealed for super::RoSync {} } /// Marker type for read-only transactions. #[derive(Debug, Clone, Copy)] #[non_exhaustive] -pub struct RO; +pub struct Ro; /// Marker type for read-write transactions. #[derive(Debug, Clone, Copy)] #[non_exhaustive] -pub struct RW; +pub struct Rw; + +/// Marker type for synchronized read-only transactions. +#[derive(Debug, Clone, Copy)] +#[non_exhaustive] +pub struct RoSync; + +/// Marker type for synchronized read-write transactions. +#[derive(Debug, Clone, Copy)] +#[non_exhaustive] +pub struct RwSync; -/// Marker trait for transaction kinds with associated inner type. +/// Marker trait for transaction kinds. /// -/// The `Inner` associated type determines how the transaction pointer is -/// stored: -/// - For [`RO`]: Either `RoInner` (Arc/Weak) with feature `read-tx-timeouts`, -/// or raw pointer without it -/// - For [`RW`]: Always raw pointer (direct ownership) -pub trait TransactionKind: private::Sealed + core::fmt::Debug + 'static { +/// Composed of [`WriterKind`] and [`SyncKind`]. +pub trait TransactionKind: WriterKind + SyncKind { + /// Construct a new transaction of this kind from the given environment. + /// + /// This does NOT register RwSync transactions with the environment's + /// transaction manager; that is the caller's responsibility. #[doc(hidden)] - const OPEN_FLAGS: MDBX_txn_flags_t; + fn new_from_env(env: Environment) -> MdbxResult { + let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); + unsafe { + mdbx_result(ffi::mdbx_txn_begin_ex( + env.env_ptr(), + ptr::null_mut(), + Self::OPEN_FLAGS, + &mut txn, + ptr::null_mut(), + ))?; + } - /// Whether this is a read-only transaction. - const IS_READ_ONLY: bool; + Ok(Self::Access::from_ptr_and_env(txn, env, Self::IS_READ_ONLY)) + } + + /// Create a new tracing span for this transaction kind. + #[doc(hidden)] + fn new_span(txn_id: usize) -> tracing::Span { + tracing::debug_span!( + target: "libmdbx", + "mdbx_txn", + kind = %if Self::IS_READ_ONLY { "ro" } else { "rw" }, + sync = %if Self::SYNC { "sync" } else { "unsync" }, + txn_id = txn_id, + ) + } +} + +impl TransactionKind for T where T: WriterKind + SyncKind {} + +/// Marker trait for synchronized transaction kinds. Describes +/// both the synchronization property and associated types. +pub trait SyncKind { + /// Whether this transaction kind is synchronized (thread-safe). + const SYNC: bool = false; /// The inner storage type for the transaction pointer. - type Inner: TxPtrAccess; + type Access: TxPtrAccess; + + /// Cache type used for this transaction kind. + type Cache: Cache + Send; +} + +impl SyncKind for RoSync { + const SYNC: bool = true; + type Access = Arc; + type Cache = SharedCache; +} + +impl SyncKind for RwSync { + const SYNC: bool = true; + type Access = Arc; + type Cache = SharedCache; +} + +impl SyncKind for Ro { + type Access = PtrUnsync; + type Cache = RefCell; +} + +impl SyncKind for Rw { + type Access = PtrUnsync; + type Cache = RefCell; } -impl TransactionKind for RO { - const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_RDONLY; +/// Marker trait for writable transaction kinds. +/// +/// Primarily used for writing bounds of the form +/// `K: TransactionKind + WriteMarker`. +pub trait WriteMarker: private::Sealed {} + +impl WriteMarker for Rw {} +impl WriteMarker for RwSync {} + +/// Marker trait for transaction writer kinds. Either read-only or read-write. +pub trait WriterKind: private::Sealed + core::fmt::Debug + 'static { + /// Whether this transaction kind is read-only. const IS_READ_ONLY: bool = true; - // Without timeouts, RO uses direct pointer like RW - type Inner = RoGuard; + /// MDBX flags to use when opening a transaction of this kind. + const OPEN_FLAGS: MDBX_txn_flags_t = + { if Self::IS_READ_ONLY { MDBX_TXN_RDONLY } else { MDBX_TXN_READWRITE } }; } -impl TransactionKind for RW { - const OPEN_FLAGS: MDBX_txn_flags_t = MDBX_TXN_READWRITE; - const IS_READ_ONLY: bool = false; +impl WriterKind for Ro {} - type Inner = RwUnsync; +impl WriterKind for Rw { + const IS_READ_ONLY: bool = false; +} +impl WriterKind for RoSync {} +impl WriterKind for RwSync { + const IS_READ_ONLY: bool = false; } diff --git a/src/tx/lat.rs b/src/tx/lat.rs new file mode 100644 index 0000000..269a014 --- /dev/null +++ b/src/tx/lat.rs @@ -0,0 +1,79 @@ +use std::time::Duration; + +/// Commit latencies info. +/// +/// Contains information about latency of commit stages. +/// Inner struct stores this info in 1/65536 of seconds units. +#[derive(Debug, Clone, Copy)] +#[repr(transparent)] +pub struct CommitLatency(ffi::MDBX_commit_latency); + +impl CommitLatency { + /// Create a new `CommitLatency` with zero'd inner struct `ffi::MDBX_commit_latency`. + pub(crate) const fn new() -> Self { + unsafe { Self(std::mem::zeroed()) } + } + + /// Returns a mut pointer to `ffi::MDBX_commit_latency`. + pub(crate) const fn mdb_commit_latency(&mut self) -> *mut ffi::MDBX_commit_latency { + &mut self.0 + } +} + +impl CommitLatency { + /// Duration of preparation (commit child transactions, update + /// sub-databases records and cursors destroying). + #[inline] + pub const fn preparation(&self) -> Duration { + Self::time_to_duration(self.0.preparation) + } + + /// Duration of GC update by wall clock. + #[inline] + pub const fn gc_wallclock(&self) -> Duration { + Self::time_to_duration(self.0.gc_wallclock) + } + + /// Duration of internal audit if enabled. + #[inline] + pub const fn audit(&self) -> Duration { + Self::time_to_duration(self.0.audit) + } + + /// Duration of writing dirty/modified data pages to a filesystem, + /// i.e. the summary duration of a `write()` syscalls during commit. + #[inline] + pub const fn write(&self) -> Duration { + Self::time_to_duration(self.0.write) + } + + /// Duration of syncing written data to the disk/storage, i.e. + /// the duration of a `fdatasync()` or a `msync()` syscall during commit. + #[inline] + pub const fn sync(&self) -> Duration { + Self::time_to_duration(self.0.sync) + } + + /// Duration of transaction ending (releasing resources). + #[inline] + pub const fn ending(&self) -> Duration { + Self::time_to_duration(self.0.ending) + } + + /// The total duration of a commit. + #[inline] + pub const fn whole(&self) -> Duration { + Self::time_to_duration(self.0.whole) + } + + /// User-mode CPU time spent on GC update. + #[inline] + pub const fn gc_cputime(&self) -> Duration { + Self::time_to_duration(self.0.gc_cputime) + } + + #[inline] + const fn time_to_duration(time: u32) -> Duration { + Duration::from_nanos(time as u64 * (1_000_000_000 / 65_536)) + } +} diff --git a/src/tx/mod.rs b/src/tx/mod.rs index da0e20a..45c0069 100644 --- a/src/tx/mod.rs +++ b/src/tx/mod.rs @@ -6,7 +6,7 @@ //! - [`TxUnsync`] - Single-threaded unsynchronized transaction //! - [`Cursor`] - Database cursor for navigating entries //! - [`Database`] - Handle to an opened database -//! - [`RO`], [`RW`] - Transaction kind markers +//! - [`Ro`], [`Rw`], [`RoSync`], [`RwSync`] - Transaction kind markers //! - [`CommitLatency`] - Commit timing information //! //! # Type Aliases @@ -15,22 +15,22 @@ //! - [`RoTxSync`], [`RwTxSync`] - Synchronized transactions //! - [`RoTxUnsync`], [`RwTxUnsync`] - Unsynchronized transactions //! - [`RoCursorSync`], [`RwCursorSync`] - Cursors for synchronized transactions -//! - [`RoCursorUnsync`], [`RwCursorUnsync`] - Cursors for unsynchronized transactions +//! - [`RoCursorUnsync`], [`RwCursorUnsync`] - Cursors for unsynchronized +//! transactions //! //! # Advanced: Writing Generic Code //! -//! For users writing generic code over cursors or transactions, the -//! [`TxPtrAccess`] trait is available. This trait abstracts over the different -//! ways transaction pointers are stored and accessed. -//! +//! For users writing generic code over cursors or transactions, we recommend +//! reviewing the [`TransactionKind`], [`WriterKind`], and [`SyncKind`] traits, +//! as well as exploring the bounds on impl blocks for the various transaction +//! and cursor types. -mod access; -pub(crate) use access::PtrSync; -pub use access::{PtrSyncInner, RoGuard, RwUnsync, TxPtrAccess}; mod assertions; -mod cache; -pub(crate) use cache::{CachedDb, SharedCache}; +mod access; +pub use access::{PtrSync, PtrUnsync, TxPtrAccess}; + +pub mod cache; mod cursor; pub use cursor::{Cursor, RoCursorSync, RoCursorUnsync, RwCursorSync, RwCursorUnsync}; @@ -42,31 +42,13 @@ pub mod iter; pub use iter::{RoIterSync, RoIterUnsync, RwIterSync, RwIterUnsync}; mod kind; -pub use kind::{RO, RW, TransactionKind}; +pub use kind::{Ro, RoSync, Rw, RwSync, SyncKind, TransactionKind, WriteMarker, WriterKind}; + +mod lat; +pub use lat::CommitLatency; /// Raw operations on transactions. pub mod ops; -mod sync; -#[allow(unused_imports)] // this is used in some features -pub use sync::{CommitLatency, TxSync}; - -pub mod unsync; -pub use unsync::TxUnsync; - -/// A synchronized read-only transaction. -pub type RoTxSync = TxSync; - -/// A synchronized read-write transaction. -pub type RwTxSync = TxSync; - -/// An unsynchronized read-only transaction. -pub type RoTxUnsync = TxUnsync; - -/// An unsynchronized read-write transaction. -pub type RwTxUnsync = TxUnsync; - -/// The default maximum duration of a read transaction. -#[cfg(feature = "read-tx-timeouts")] -pub const DEFAULT_MAX_READ_TRANSACTION_DURATION: std::time::Duration = - std::time::Duration::from_secs(5 * 60); +mod r#impl; +pub use r#impl::{RoTxSync, RoTxUnsync, RwTxSync, RwTxUnsync, Tx, TxSync, TxUnsync}; diff --git a/src/tx/sync.rs b/src/tx/sync.rs deleted file mode 100644 index 5df1ea5..0000000 --- a/src/tx/sync.rs +++ /dev/null @@ -1,787 +0,0 @@ -#[cfg(debug_assertions)] -use crate::tx::assertions; -use crate::{ - Cursor, Database, Environment, MdbxError, RO, RW, ReadResult, Stat, TableObject, - TransactionKind, - error::{MdbxResult, mdbx_result}, - flags::{DatabaseFlags, WriteFlags}, - sys::txn_manager::{RawTxPtr, TxnManagerMessage}, - tx::{CachedDb, PtrSync, PtrSyncInner, SharedCache, TxPtrAccess, ops}, -}; -use smallvec::SmallVec; -use std::{ - ffi::CStr, - fmt, ptr, - sync::{Arc, mpsc::sync_channel}, - time::Duration, -}; - -/// An MDBX transaction. -/// -/// All database operations require a transaction. -pub struct TxSync -where - K: TransactionKind, -{ - inner: Arc>, -} - -impl TxSync -where - K: TransactionKind, -{ - pub(crate) fn new_from_ptr(env: Environment, txn_ptr: *mut ffi::MDBX_txn) -> Self { - let txn = PtrSync::::new(env, txn_ptr); - - let inner = SyncInner { ptr: txn, db_cache: SharedCache::default() }; - - Self { inner: Arc::new(inner) } - } - - /// Executes the given closure once the lock on the transaction is acquired. - /// - /// The caller **must** ensure that the pointer is not used after the - /// lifetime of the transaction. - #[inline] - pub fn txn_execute(&self, f: F) -> MdbxResult - where - F: FnOnce(*mut ffi::MDBX_txn) -> T, - { - self.inner.txn_execute(f) - } - - /// Returns a raw pointer to the MDBX environment. - pub fn env(&self) -> &Environment { - self.inner.env() - } - - /// Returns the tracing span for this transaction. - /// - /// Users can enter this span to associate operations with the transaction: - /// ```ignore - /// let _guard = txn.span().enter(); - /// // operations here are within the transaction span - /// ``` - pub fn span(&self) -> &tracing::Span { - self.inner.span() - } - - /// Returns the transaction id. - pub fn id(&self) -> MdbxResult { - self.txn_execute(|txn| unsafe { ffi::mdbx_txn_id(txn) }) - } - - /// Gets an item from a database. - /// - /// This function retrieves the data associated with the given key in the - /// database. If the database supports duplicate keys - /// ([`DatabaseFlags::DUP_SORT`]) then the first data item for the key will be - /// returned. Retrieval of other items requires the use of - /// [Cursor]. If the item is not in the database, then - /// [None] will be returned. - pub fn get<'a, Key>(&'a self, dbi: ffi::MDBX_dbi, key: &[u8]) -> ReadResult> - where - Key: TableObject<'a>, - { - self.txn_execute(|txn_ptr| { - // SAFETY: - // txn is a valid transaction pointer from txn_execute. - // The decoded Cow is valid as long as the data is not dirty, and - // the tx is alive. - // The lifetime 'tx statically guarantees that the Cow cannot - // outlive the transaction. - // `decode_val` checks for dirty writes and copies data if needed. - unsafe { - let data_val = ops::get_raw(txn_ptr, dbi, key)?; - data_val.map(|val| Key::decode_val::(txn_ptr, val)).transpose() - } - })? - } - - /// Commits the transaction. - /// - /// Any pending operations will be saved. - pub fn commit(self) -> MdbxResult { - let _guard = self.inner.span().enter(); - - let (was_aborted, lat) = self.txn_execute(|txn| { - if K::IS_READ_ONLY { - #[cfg(feature = "read-tx-timeouts")] - self.env().txn_manager().remove_active_read_transaction(txn); - - let mut latency = CommitLatency::new(); - mdbx_result(unsafe { ffi::mdbx_txn_commit_ex(txn, latency.mdb_commit_latency()) }) - .map(|v| (v, latency)) - } else { - let (sender, rx) = sync_channel(0); - self.env() - .txn_manager() - .send_message(TxnManagerMessage::Commit { tx: RawTxPtr(txn), sender }); - rx.recv().unwrap() - } - })??; - - self.inner.ptr.mark_committed(); - - if was_aborted { - tracing::warn!(target: "libmdbx", "botched"); - return Err(MdbxError::BotchedTransaction); - } - - tracing::debug!( - target: "libmdbx", - latency_whole_ms = lat.whole().as_millis() as u64, - "committed" - ); - Ok(lat) - } - - /// Opens a handle to an MDBX database, and cache the handle for re-use. - /// - /// If `name` is `None`, then the returned handle will be for the default - /// database. - /// - /// If `name` is not `None`, then the returned handle will be for a named - /// database. In this case the environment must be configured to allow - /// named databases through - /// [`EnvironmentBuilder::set_max_dbs()`](crate::EnvironmentBuilder::set_max_dbs). - /// - /// The returned database handle MAY be shared among any transaction in the - /// environment. However, if the tx is RW and the DB is created within the - /// tx, the DB will not be visible to other transactions until the tx is - /// committed. - /// - /// The database name MAY NOT contain the null character. - pub fn open_db(&self, name: Option<&str>) -> MdbxResult { - let name_hash = CachedDb::hash_name(name); - - if let Some(db) = self.inner.db_cache.read_db(name_hash) { - return Ok(db); - } - - self.open_and_cache_with_flags(name, DatabaseFlags::empty()).map(Into::into) - } - - /// Open a DB handle without checking or writing to the cache. - /// - /// This may be useful when the transaction intends to open many (>20) - /// tables, as cache performance will degrade slightly with size. - pub fn open_db_no_cache(&self, name: Option<&str>) -> MdbxResult { - self.open_db_with_flags(name, DatabaseFlags::empty()).map(Into::into) - } - - /// Raw open (don't check cache) with flags. Write to cache after opening. - fn open_and_cache_with_flags( - &self, - name: Option<&str>, - flags: DatabaseFlags, - ) -> Result { - // Slow path: open via FFI and cache - let db = self.open_db_with_flags(name, flags)?; - - // Double-check pattern to avoid duplicate entries - self.inner.db_cache.write_db(db); - - Ok(db) - } - - /// Raw open (don't check cache) with flags. - /// - /// Return the name hash along with the database. - fn open_db_with_flags(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { - let mut c_name_buf = SmallVec::<[u8; 32]>::new(); - let c_name = name.map(|n| { - c_name_buf.extend_from_slice(n.as_bytes()); - c_name_buf.push(0); - CStr::from_bytes_with_nul(&c_name_buf).unwrap() - }); - let name_ptr = c_name.as_ref().map_or(ptr::null(), |s| s.as_ptr()); - - let (dbi, db_flags) = self.txn_execute(|txn_ptr| { - // SAFETY: txn_ptr is valid from txn_execute, name_ptr is valid or null. - unsafe { ops::open_db_raw(txn_ptr, name_ptr, flags) } - })??; - - Ok(CachedDb::new(name, Database::new(dbi, db_flags))) - } - - /// Gets the option flags for the given database in the transaction. - pub fn db_flags(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { - self.txn_execute(|txn| { - // SAFETY: txn is a valid transaction pointer from txn_execute. - unsafe { ops::db_flags_raw(txn, dbi) } - })? - } - - /// Retrieves database statistics. - pub fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { - self.db_stat_with_dbi(dbi) - } - - /// Retrieves database statistics by the given dbi. - pub fn db_stat_with_dbi(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { - self.txn_execute(|txn| { - // SAFETY: txn is a valid transaction pointer from txn_execute. - unsafe { ops::db_stat_raw(txn, dbi) } - })? - } - - /// Open a new cursor on the given database. - pub fn cursor(&self, db: Database) -> MdbxResult>> { - Cursor::new(&self.inner.ptr, db) - } - - /// Open a new cursor on the given dbi. - #[deprecated(since = "0.2.0", note = "use `cursor(&Database)` instead")] - pub fn cursor_with_dbi(&self, db: Database) -> MdbxResult>> { - Cursor::new(&self.inner.ptr, db) - } - - /// Disables a timeout for this read transaction. - #[cfg(feature = "read-tx-timeouts")] - pub fn disable_timeout(&self) { - if K::IS_READ_ONLY { - // SAFETY: Not performing any operation on the txn, just updating - // internal state. - self.env() - .txn_manager() - .remove_active_read_transaction(unsafe { self.inner.ptr.txn_ptr() }); - } - } -} - -impl Clone for TxSync -where - K: TransactionKind, -{ - fn clone(&self) -> Self { - Self { inner: Arc::clone(&self.inner) } - } -} - -impl fmt::Debug for TxSync -where - K: TransactionKind, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RoTransaction").finish_non_exhaustive() - } -} - -/// Internals of a transaction. -struct SyncInner -where - K: TransactionKind, -{ - /// The transaction pointer itself. - ptr: PtrSync, - - /// Cache of opened database handles. - db_cache: SharedCache, -} - -impl SyncInner -where - K: TransactionKind, -{ - fn env(&self) -> &Environment { - self.ptr.env() - } - - fn span(&self) -> &tracing::Span { - self.ptr.span() - } - - #[inline] - fn txn_execute(&self, f: F) -> MdbxResult - where - F: FnOnce(*mut ffi::MDBX_txn) -> T, - { - self.ptr.txn_execute_fail_on_timeout(f) - } -} - -impl Drop for SyncInner { - fn drop(&mut self) { - #[cfg(feature = "read-tx-timeouts")] - if K::IS_READ_ONLY { - // Remove from active read transactions before dropping PtrSync. - // This breaks the circular Arc reference: the map holds a PtrSync - // clone, so we must remove it before the final reference is dropped. - // - // SAFETY: Not performing any MDBX operation, just updating internal - // tracking state. - unsafe { - self.ptr.env().txn_manager().remove_active_read_transaction(self.ptr.txn_ptr()); - } - } - } -} - -impl TxSync { - /// Opens a handle to an MDBX database, creating the database if necessary. - /// - /// If the database is already created, the given option flags will be - /// added to it. - /// - /// If `name` is [None], then the returned handle will be for the default - /// database. - /// - /// If `name` is not [None], then the returned handle will be for a named - /// database. In this case the environment must be configured to allow - /// named databases through [`EnvironmentBuilder::set_max_dbs()`]. - /// - /// This function will fail with [`MdbxError::BadRslot`] if called by a - /// thread with an open transaction. - /// - /// [`EnvironmentBuilder::set_max_dbs()`]: crate::EnvironmentBuilder::set_max_dbs - pub fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { - self.open_db_with_flags(name, flags | DatabaseFlags::CREATE).map(Into::into) - } - - /// Stores an item into a database. - /// - /// This function stores key/data pairs in the database. The default - /// behavior is to enter the new key/data pair, replacing any previously - /// existing key if duplicates are disallowed, or adding a duplicate data - /// item if duplicates are allowed ([`DatabaseFlags::DUP_SORT`]). - pub fn put( - &self, - db: Database, - key: impl AsRef<[u8]>, - data: impl AsRef<[u8]>, - flags: WriteFlags, - ) -> MdbxResult<()> { - let key = key.as_ref(); - let data = data.as_ref(); - - #[cfg(debug_assertions)] - { - let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); - assertions::debug_assert_put(pagesize, db.flags(), key, data); - } - - self.txn_execute(|txn| { - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { ops::put_raw(txn, db.dbi(), key, data, flags) } - })? - } - - /// Appends a key/data pair to the end of the database. - /// - /// The key must be greater than all existing keys (or less than, for - /// [`DatabaseFlags::REVERSE_KEY`] tables). This is more efficient than - /// [`TxSync::put`] when adding data in sorted order. - /// - /// In debug builds, this method asserts that the key ordering constraint is - /// satisfied. - pub fn append( - &self, - db: Database, - key: impl AsRef<[u8]>, - data: impl AsRef<[u8]>, - ) -> MdbxResult<()> { - let key = key.as_ref(); - let data = data.as_ref(); - - self.txn_execute(|txn| { - #[cfg(debug_assertions)] - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { - ops::debug_assert_append(txn, db.dbi(), db.flags(), key, data); - } - - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { ops::put_raw(txn, db.dbi(), key, data, WriteFlags::APPEND) } - })? - } - - /// Appends duplicate data for [`DatabaseFlags::DUP_SORT`] databases. - /// - /// The data must be greater than all existing data for this key (or less - /// than, for [`DatabaseFlags::REVERSE_DUP`] tables). This is more efficient - /// than [`TxSync::put`] when adding duplicates in sorted order. - /// - /// Returns [`MdbxError::RequiresDupSort`] if the database does not have the - /// [`DatabaseFlags::DUP_SORT`] flag set. - /// - /// In debug builds, this method asserts that the data ordering constraint - /// is satisfied. - pub fn append_dup( - &self, - db: Database, - key: impl AsRef<[u8]>, - data: impl AsRef<[u8]>, - ) -> MdbxResult<()> { - if !db.flags().contains(DatabaseFlags::DUP_SORT) { - return Err(MdbxError::RequiresDupSort); - } - let key = key.as_ref(); - let data = data.as_ref(); - - self.txn_execute(|txn| { - #[cfg(debug_assertions)] - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { - ops::debug_assert_append_dup(txn, db.dbi(), db.flags(), key, data); - } - - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { ops::put_raw(txn, db.dbi(), key, data, WriteFlags::APPEND_DUP) } - })? - } - - /// Returns a buffer which can be used to write a value into the item at the - /// given key and with the given length. The buffer must be completely - /// filled by the caller. - /// - /// This should not be used on dupsort tables. - /// - /// # Safety - /// - /// The caller must ensure that the returned buffer is not used after the - /// transaction is committed or aborted, or if another value is inserted. - /// To be clear: the second call to this function is not permitted while - /// the returned slice is reachable. - #[allow(clippy::mut_from_ref)] - pub unsafe fn reserve( - &self, - db: Database, - key: impl AsRef<[u8]>, - len: usize, - flags: WriteFlags, - ) -> MdbxResult<&mut [u8]> { - let key = key.as_ref(); - - #[cfg(debug_assertions)] - { - let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); - assertions::debug_assert_key(pagesize, db.flags(), key); - } - - let ptr = self.txn_execute(|txn| { - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { ops::reserve_raw(txn, db.dbi(), key, len, flags) } - })??; - // SAFETY: ptr is valid from reserve_raw, len matches. - Ok(unsafe { ops::slice_from_reserved(ptr, len) }) - } - - /// Reserves space for a value of the given length at the given key, and - /// calls the given closure with a mutable slice to write into. - /// - /// This is a safe wrapper around [`TxSync::reserve`]. - pub fn with_reservation( - &self, - db: Database, - key: impl AsRef<[u8]>, - len: usize, - flags: WriteFlags, - f: impl FnOnce(&mut [u8]), - ) -> MdbxResult<()> { - let buf = unsafe { self.reserve(db, key, len, flags)? }; - f(buf); - Ok(()) - } - - /// Delete items from a database. - /// This function removes key/data pairs from the database. - /// - /// The data parameter is NOT ignored regardless the database does support - /// sorted duplicate data items or not. If the data parameter is [Some] - /// only the matching data item will be deleted. Otherwise, if data - /// parameter is [None], any/all value(s) for specified key will - /// be deleted. - /// - /// Returns `true` if the key/value pair was present. - pub fn del( - &self, - db: Database, - key: impl AsRef<[u8]>, - data: Option<&[u8]>, - ) -> MdbxResult { - let key = key.as_ref(); - - #[cfg(debug_assertions)] - { - let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); - assertions::debug_assert_key(pagesize, db.flags(), key); - if let Some(v) = data { - assertions::debug_assert_value(pagesize, db.flags(), v); - } - } - - self.txn_execute(|txn| { - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { ops::del_raw(txn, db.dbi(), key, data) } - })? - } - - /// Empties the given database. All items will be removed. - pub fn clear_db(&self, db: Database) -> MdbxResult<()> { - self.txn_execute(|txn| { - // SAFETY: txn is a valid RW transaction pointer from txn_execute. - unsafe { ops::clear_db_raw(txn, db.dbi()) } - })? - } - - /// Drops the database from the environment. - /// - /// # Safety - /// Caller must close ALL other [Database] and [Cursor] instances pointing - /// to the same dbi BEFORE calling this function. - pub unsafe fn drop_db(&self, db: Database) -> MdbxResult<()> { - self.txn_execute(|txn| { - // SAFETY: txn is a valid RW transaction pointer, caller ensures - // no other references to dbi exist. - unsafe { ops::drop_db_raw(txn, db.dbi()) } - })??; - - self.inner.db_cache.remove_dbi(db.dbi()); - - Ok(()) - } -} - -impl TxSync { - pub(crate) fn new(env: Environment) -> MdbxResult { - let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); - unsafe { - mdbx_result(ffi::mdbx_txn_begin_ex( - env.env_ptr(), - ptr::null_mut(), - RO::OPEN_FLAGS, - &mut txn, - ptr::null_mut(), - ))?; - } - let this = Self::new_from_ptr(env, txn); - - #[cfg(feature = "read-tx-timeouts")] - this.env().txn_manager().add_active_read_transaction(txn, this.inner.ptr.clone()); - - Ok(this) - } - - /// Closes the database handle. - /// - /// # Safety - /// - /// This will invalidate data cached in [`Database`] instances, and may - /// result in bad behavior when using those instances after calling this - /// function. - pub unsafe fn close_db(&self, dbi: ffi::MDBX_dbi) -> MdbxResult<()> { - // SAFETY: Caller ensures the database is not in use. - unsafe { ops::close_db_raw(self.env().env_ptr(), dbi) }?; - - self.inner.db_cache.remove_dbi(dbi); - - Ok(()) - } -} - -impl TxSync { - /// Begins a new nested transaction inside of this transaction. - pub fn begin_nested_txn(&mut self) -> MdbxResult { - if self.inner.ptr.env().is_write_map() { - return Err(MdbxError::NestedTransactionsUnsupportedWithWriteMap); - } - self.txn_execute(|txn| { - let (tx, rx) = sync_channel(0); - self.env().txn_manager().send_message(TxnManagerMessage::Begin { - parent: RawTxPtr(txn), - flags: RW::OPEN_FLAGS, - sender: tx, - }); - - rx.recv().unwrap().map(|ptr| Self::new_from_ptr(self.env().clone(), ptr.0)) - })? - } -} - -/// Commit latencies info. -/// -/// Contains information about latency of commit stages. -/// Inner struct stores this info in 1/65536 of seconds units. -#[derive(Debug, Clone, Copy)] -#[repr(transparent)] -pub struct CommitLatency(ffi::MDBX_commit_latency); - -impl CommitLatency { - /// Create a new `CommitLatency` with zero'd inner struct `ffi::MDBX_commit_latency`. - pub(crate) const fn new() -> Self { - unsafe { Self(std::mem::zeroed()) } - } - - /// Returns a mut pointer to `ffi::MDBX_commit_latency`. - pub(crate) const fn mdb_commit_latency(&mut self) -> *mut ffi::MDBX_commit_latency { - &mut self.0 - } -} - -impl CommitLatency { - /// Duration of preparation (commit child transactions, update - /// sub-databases records and cursors destroying). - #[inline] - pub const fn preparation(&self) -> Duration { - Self::time_to_duration(self.0.preparation) - } - - /// Duration of GC update by wall clock. - #[inline] - pub const fn gc_wallclock(&self) -> Duration { - Self::time_to_duration(self.0.gc_wallclock) - } - - /// Duration of internal audit if enabled. - #[inline] - pub const fn audit(&self) -> Duration { - Self::time_to_duration(self.0.audit) - } - - /// Duration of writing dirty/modified data pages to a filesystem, - /// i.e. the summary duration of a `write()` syscalls during commit. - #[inline] - pub const fn write(&self) -> Duration { - Self::time_to_duration(self.0.write) - } - - /// Duration of syncing written data to the disk/storage, i.e. - /// the duration of a `fdatasync()` or a `msync()` syscall during commit. - #[inline] - pub const fn sync(&self) -> Duration { - Self::time_to_duration(self.0.sync) - } - - /// Duration of transaction ending (releasing resources). - #[inline] - pub const fn ending(&self) -> Duration { - Self::time_to_duration(self.0.ending) - } - - /// The total duration of a commit. - #[inline] - pub const fn whole(&self) -> Duration { - Self::time_to_duration(self.0.whole) - } - - /// User-mode CPU time spent on GC update. - #[inline] - pub const fn gc_cputime(&self) -> Duration { - Self::time_to_duration(self.0.gc_cputime) - } - - #[inline] - const fn time_to_duration(time: u32) -> Duration { - Duration::from_nanos(time as u64 * (1_000_000_000 / 65_536)) - } -} - -// SAFETY: Access to the transaction is synchronized by the lock. -unsafe impl Send for PtrSync {} - -// SAFETY: Access to the transaction is synchronized by the lock. -unsafe impl Sync for PtrSync {} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::tempdir; - - const fn assert_send_sync() {} - - #[expect(dead_code)] - const fn test_txn_send_sync() { - assert_send_sync::>(); - assert_send_sync::>(); - } - - #[test] - fn test_db_cache_returns_same_db() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_ro_txn().unwrap(); - - let db1 = txn.open_db(None).unwrap(); - let db2 = txn.open_db(None).unwrap(); - - assert_eq!(db1.dbi(), db2.dbi()); - assert_eq!(db1.flags(), db2.flags()); - } - - #[test] - fn test_db_cache_no_cache_still_works() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_ro_txn().unwrap(); - - let db1 = txn.open_db_no_cache(None).unwrap(); - let db2 = txn.open_db_no_cache(None).unwrap(); - - // Same DBI should be returned by MDBX - assert_eq!(db1.dbi(), db2.dbi()); - } - - #[test] - fn test_db_cache_cached_matches_uncached() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_ro_txn().unwrap(); - - let cached = txn.open_db(None).unwrap(); - let uncached = txn.open_db_no_cache(None).unwrap(); - - assert_eq!(cached.dbi(), uncached.dbi()); - assert_eq!(cached.flags(), uncached.flags()); - } - - #[test] - fn test_db_cache_multiple_named_dbs() { - let dir = tempdir().unwrap(); - let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap(); - - // Create named DBs - { - let txn = env.begin_rw_txn().unwrap(); - txn.create_db(Some("db1"), DatabaseFlags::empty()).unwrap(); - txn.create_db(Some("db2"), DatabaseFlags::empty()).unwrap(); - txn.commit().unwrap(); - } - - let txn = env.begin_ro_txn().unwrap(); - - let db1_a = txn.open_db(Some("db1")).unwrap(); - let db2_a = txn.open_db(Some("db2")).unwrap(); - let db1_b = txn.open_db(Some("db1")).unwrap(); - let db2_b = txn.open_db(Some("db2")).unwrap(); - - // Same named DB returns same handle - assert_eq!(db1_a.dbi(), db1_b.dbi()); - assert_eq!(db2_a.dbi(), db2_b.dbi()); - - // Different DBs have different handles - assert_ne!(db1_a.dbi(), db2_a.dbi()); - } - - #[test] - fn test_db_cache_flags_preserved() { - let dir = tempdir().unwrap(); - let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap(); - - // Create DB with specific flags - { - let txn = env.begin_rw_txn().unwrap(); - txn.create_db(Some("dupsort"), DatabaseFlags::DUP_SORT).unwrap(); - txn.commit().unwrap(); - } - - let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(Some("dupsort")).unwrap(); - - assert!(db.flags().contains(DatabaseFlags::DUP_SORT)); - - // Second open should have same flags from cache - let db2 = txn.open_db(Some("dupsort")).unwrap(); - assert!(db2.flags().contains(DatabaseFlags::DUP_SORT)); - } -} diff --git a/src/tx/unsync.rs b/src/tx/unsync.rs deleted file mode 100644 index c411b41..0000000 --- a/src/tx/unsync.rs +++ /dev/null @@ -1,588 +0,0 @@ -//! Alternative transaction implementation that provide unsynchronized -//! access for read-write transactions. -//! -//! These transactions are significantly faster than synchronized transactions -//! when used in a single-threaded context, as they avoid the overhead of -//! mutexes locking. However, they are `!Sync` due to MDBX's thread -//! requirements. The RW transactions are `!Send` and must be used on the -//! creating thread, while RO transactions can be sent between threads but not -//! shared concurrently. -//! -//! | Transaction Type | Send | Sync | MDBX Requirement | -//! |------------------|------|------|-------------------------------| -//! | Read-Only (RO) | Yes | No | Total ordering of access | -//! | Read-Write (RW) | No | No | Single-threaded ownership | -//! -//! # Design -//! -//! - **RO transactions**: Use Arc/Weak pattern for timeout support. When a -//! timeout is set, a background thread holds the Arc and drops it on timeout, -//! causing the transaction to be aborted. The transaction holds a Weak -//! reference and upgrades it for each operation. -//! -//! - **RW transactions**: Use direct pointer ownership with `!Send` to ensure -//! they stay on the creating thread. No mutex needed since RW transactions -//! are single-threaded. - -#[cfg(debug_assertions)] -use crate::tx::assertions; -use crate::{ - CommitLatency, Database, Environment, MdbxError, RO, RW, ReadResult, Stat, TableObject, - TransactionKind, - error::{MdbxResult, mdbx_result}, - flags::{DatabaseFlags, WriteFlags}, - tx::{ - Cursor, RoGuard, RwUnsync, TxPtrAccess, - access::RoTxPtr, - cache::{CachedDb, DbCache}, - ops, - }, -}; -use smallvec::SmallVec; -use std::{ffi::CStr, fmt, marker::PhantomData, ptr}; - -/// Meta-data for a transaction. -pub struct TxMeta { - env: Environment, - db_cache: DbCache, - span: tracing::Span, -} - -impl fmt::Debug for TxMeta { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("TxMeta").finish() - } -} - -/// A database transaction (v2 implementation). -/// -/// This implementation uses: -/// - Arc/Weak pattern for RO transactions (!Sync, with timeout support) -/// - Direct ownership for RW transactions (!Send, !Sync, no mutex needed) -pub struct TxUnsync { - txn: K::Inner, - - meta: TxMeta, - - _marker: PhantomData, -} - -impl TxUnsync { - fn new_inner(env: Environment) -> MdbxResult<(*mut ffi::MDBX_txn, TxMeta)> { - let mut txn: *mut ffi::MDBX_txn = ptr::null_mut(); - // SAFETY: env.env_ptr() is valid, we check the result. - unsafe { - mdbx_result(ffi::mdbx_txn_begin_ex( - env.env_ptr(), - ptr::null_mut(), - K::OPEN_FLAGS, - &mut txn, - ptr::null_mut(), - ))?; - } - - let txn_id = unsafe { ffi::mdbx_txn_id(txn) }; - let span = tracing::debug_span!( - target: "libmdbx", - "mdbx_txn_v2", - kind = if K::IS_READ_ONLY { "ro" } else { "rw" }, - txn_id, - committed = false, - ); - - Ok((txn, TxMeta { env, db_cache: DbCache::default(), span })) - } -} - -impl TxUnsync { - /// Creates the raw pointer and metadata from an environment. - fn begin(env: Environment) -> MdbxResult<(RoTxPtr, TxMeta)> { - let (txn, meta) = Self::new_inner(env)?; - Ok((RoTxPtr::from(txn), meta)) - } - - /// Completes construction from a guard and metadata. - const fn from_guard(guard: RoGuard, meta: TxMeta) -> Self { - Self { txn: guard, meta, _marker: PhantomData } - } - - /// Creates a new read-only transaction. - #[cfg(not(feature = "read-tx-timeouts"))] - pub(crate) fn new(env: Environment) -> MdbxResult { - let (ptr, meta) = Self::begin(env)?; - Ok(Self::from_guard(RoGuard::new_no_timeout(ptr), meta)) - } - - /// Creates a new read-only transaction with the default timeout. - #[cfg(feature = "read-tx-timeouts")] - pub(crate) fn new(env: Environment) -> MdbxResult { - use crate::tx::DEFAULT_MAX_READ_TRANSACTION_DURATION; - Self::new_with_timeout(env, DEFAULT_MAX_READ_TRANSACTION_DURATION) - } - - /// Creates a new read-only transaction without a timeout. - #[cfg(feature = "read-tx-timeouts")] - pub(crate) fn new_no_timeout(env: Environment) -> MdbxResult { - let (ptr, meta) = Self::begin(env)?; - Ok(Self::from_guard(RoGuard::new_no_timeout(ptr), meta)) - } - - /// Creates a new read-only transaction with a custom timeout. - #[cfg(feature = "read-tx-timeouts")] - pub(crate) fn new_with_timeout( - env: Environment, - duration: std::time::Duration, - ) -> MdbxResult { - let (ptr, meta) = Self::begin(env)?; - Ok(Self::from_guard(RoGuard::new_with_timeout(ptr, duration), meta)) - } - - /// Tries to disable the timeout timer for this transaction. - #[cfg(feature = "read-tx-timeouts")] - pub fn try_disable_timer(&mut self) -> MdbxResult<()> { - self.txn.try_disable_timer() - } -} - -impl TxUnsync { - /// Creates a new read-write transaction. - pub(crate) fn new(env: Environment) -> MdbxResult { - let (txn, meta) = Self::new_inner(env)?; - - let ptr = RwUnsync::new(txn); - - Ok(Self { txn: ptr, meta, _marker: PhantomData }) - } -} - -impl TxUnsync { - /// Gets the raw transaction pointer - /// - /// This transaction takes &mut self to ensure exclusive access. This - /// ensures that accesses are serialized by definition, without needing a - /// mutex or other synchronization primitive. - #[inline(always)] - fn with_txn_ptr(&mut self, f: F) -> MdbxResult - where - F: FnOnce(*mut ffi::MDBX_txn) -> R, - { - self.txn.with_txn_ptr(f) - } - - /// Returns a reference to the environment. - pub const fn env(&self) -> &Environment { - &self.meta.env - } - - /// Returns the tracing span for this transaction. - pub const fn span(&self) -> &tracing::Span { - &self.meta.span - } - - /// Returns the transaction id. - pub fn id(&mut self) -> MdbxResult { - self.with_txn_ptr(|txn_ptr| Ok(unsafe { ffi::mdbx_txn_id(txn_ptr) }))? - } - - /// Gets an item from a database. - pub fn get<'a, Key>(&'a mut self, dbi: ffi::MDBX_dbi, key: &[u8]) -> ReadResult> - where - Key: TableObject<'a>, - { - self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { - let data_val = ops::get_raw(txn_ptr, dbi, key)?; - data_val.map(|val| Key::decode_val::(txn_ptr, val)).transpose() - } - })? - } - - /// Opens a handle to an MDBX database. - pub fn open_db(&mut self, name: Option<&str>) -> MdbxResult { - let name_hash = CachedDb::hash_name(name); - - if let Some(db) = self.meta.db_cache.read_db(name_hash) { - return Ok(db); - } - - self.open_and_cache_with_flags(name, DatabaseFlags::empty()).map(Into::into) - } - - /// Opens a database handle without using the cache. - pub fn open_db_no_cache(&mut self, name: Option<&str>) -> MdbxResult { - self.open_db_with_flags(name, DatabaseFlags::empty()).map(Into::into) - } - - fn open_and_cache_with_flags( - &mut self, - name: Option<&str>, - flags: DatabaseFlags, - ) -> MdbxResult { - let db = self.open_db_with_flags(name, flags)?; - self.meta.db_cache.write_db(db); - Ok(db) - } - - fn open_db_with_flags( - &mut self, - name: Option<&str>, - flags: DatabaseFlags, - ) -> MdbxResult { - let mut c_name_buf = SmallVec::<[u8; 32]>::new(); - let c_name = name.map(|n| { - c_name_buf.extend_from_slice(n.as_bytes()); - c_name_buf.push(0); - CStr::from_bytes_with_nul(&c_name_buf).unwrap() - }); - let name_ptr = c_name.as_ref().map_or(ptr::null(), |s| s.as_ptr()); - - let (dbi, db_flags) = self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr, name_ptr is valid or null. - unsafe { ops::open_db_raw(txn_ptr, name_ptr, flags) } - })??; - - Ok(CachedDb::new(name, Database::new(dbi, db_flags))) - } - - /// Gets the option flags for the given database. - pub fn db_flags(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult { - self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::db_flags_raw(txn_ptr, dbi) } - })? - } - - /// Retrieves database statistics. - pub fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult { - self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::db_stat_raw(txn_ptr, dbi) } - })? - } - - /// Opens a cursor on the given database. - /// - /// Multiple cursors can be open simultaneously on different databases - /// within the same transaction. The cursor borrows the transaction's - /// inner access type, allowing concurrent cursor operations. - pub fn cursor(&self, db: Database) -> MdbxResult> { - Cursor::new(&self.txn, db) - } - - /// Commits the transaction (inner implementation). - fn commit_inner(mut self, latency: *mut ffi::MDBX_commit_latency) -> MdbxResult<()> { - // Self is dropped at end of function, so RwTxPtr::drop will be within - // span scope. - let _guard = self.meta.span.clone().entered(); - - // SAFETY: txn_ptr is valid from with_txn_ptr. - let was_aborted = - self.with_txn_ptr(|txn_ptr| unsafe { ops::commit_raw(txn_ptr, latency) })??; - - self.txn.mark_committed(); - - if was_aborted { - tracing::warn!(target: "libmdbx", "botched"); - return Err(MdbxError::BotchedTransaction); - } - - Ok(()) - } - - /// Commit the transaction. - /// - /// For RO transactions, this will release resources held by the - /// transaction. For RW transactions, this will persist changes to the - /// database. - pub fn commit(self) -> MdbxResult<()> { - // SAFETY: txn_ptr is valid. - self.commit_inner(std::ptr::null_mut()) - } - - /// Commits the transaction and returns commit latency information. - /// - /// For RO transactions, this will release resources held by the - /// transaction. For RW transactions, this will persist changes to the - /// database. - pub fn commit_with_latency(self) -> MdbxResult { - let mut latency = CommitLatency::new(); - self.commit_inner(latency.mdb_commit_latency())?; - Ok(latency) - } -} - -impl TxUnsync { - /// Creates a database if it doesn't exist. - pub fn create_db(&mut self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { - self.open_db_with_flags(name, flags | DatabaseFlags::CREATE).map(Into::into) - } - - /// Stores an item into a database. - pub fn put( - &mut self, - db: Database, - key: impl AsRef<[u8]>, - data: impl AsRef<[u8]>, - flags: WriteFlags, - ) -> MdbxResult<()> { - let key = key.as_ref(); - let data = data.as_ref(); - - #[cfg(debug_assertions)] - { - let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); - assertions::debug_assert_put(pagesize, db.flags(), key, data); - } - - self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::put_raw(txn_ptr, db.dbi(), key, data, flags) } - })? - } - - /// Appends a key/data pair to the end of the database. - /// - /// The key must be greater than all existing keys (or less than, for - /// [`DatabaseFlags::REVERSE_KEY`] tables). This is more efficient than - /// [`TxUnsync::put`] when adding data in sorted order. - /// - /// In debug builds, this method asserts that the key ordering constraint is - /// satisfied. - pub fn append( - &mut self, - db: Database, - key: impl AsRef<[u8]>, - data: impl AsRef<[u8]>, - ) -> MdbxResult<()> { - let key = key.as_ref(); - let data = data.as_ref(); - - self.with_txn_ptr(|txn_ptr| { - #[cfg(debug_assertions)] - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { - ops::debug_assert_append(txn_ptr, db.dbi(), db.flags(), key, data); - } - - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::put_raw(txn_ptr, db.dbi(), key, data, WriteFlags::APPEND) } - })? - } - - /// Appends duplicate data for [`DatabaseFlags::DUP_SORT`] databases. - /// - /// The data must be greater than all existing data for this key (or less - /// than, for [`DatabaseFlags::REVERSE_DUP`] tables). This is more efficient - /// than [`TxUnsync::put`] when adding duplicates in sorted order. - /// - /// Returns [`MdbxError::RequiresDupSort`] if the database does not have the - /// [`DatabaseFlags::DUP_SORT`] flag set. - /// - /// In debug builds, this method asserts that the data ordering constraint - /// is satisfied. - pub fn append_dup( - &mut self, - db: Database, - key: impl AsRef<[u8]>, - data: impl AsRef<[u8]>, - ) -> MdbxResult<()> { - if !db.flags().contains(DatabaseFlags::DUP_SORT) { - return Err(MdbxError::RequiresDupSort); - } - let key = key.as_ref(); - let data = data.as_ref(); - - self.with_txn_ptr(|txn_ptr| { - #[cfg(debug_assertions)] - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { - ops::debug_assert_append_dup(txn_ptr, db.dbi(), db.flags(), key, data); - } - - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::put_raw(txn_ptr, db.dbi(), key, data, WriteFlags::APPEND_DUP) } - })? - } - - /// Reserves space for a value and returns a mutable slice to write into. - /// - /// # Safety - /// - /// The returned buffer is only valid until another value is inserted or - /// the transaction is committed/aborted. - #[allow(clippy::mut_from_ref)] - pub unsafe fn reserve( - &mut self, - db: Database, - key: impl AsRef<[u8]>, - len: usize, - flags: WriteFlags, - ) -> MdbxResult<&mut [u8]> { - let key = key.as_ref(); - - #[cfg(debug_assertions)] - { - let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); - assertions::debug_assert_key(pagesize, db.flags(), key); - } - - let ptr = self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::reserve_raw(txn_ptr, db.dbi(), key, len, flags) } - })??; - // SAFETY: ptr is valid from reserve_raw, len matches. - Ok(unsafe { ops::slice_from_reserved(ptr, len) }) - } - - /// Reserves space and calls the closure to write into it. - pub fn with_reservation( - &mut self, - db: Database, - key: impl AsRef<[u8]>, - len: usize, - flags: WriteFlags, - f: impl FnOnce(&mut [u8]), - ) -> MdbxResult<()> { - // SAFETY: We ensure the buffer is written to before any other operation. - let buf = unsafe { self.reserve(db, key, len, flags)? }; - f(buf); - Ok(()) - } - - /// Deletes items from a database. - pub fn del( - &mut self, - db: Database, - key: impl AsRef<[u8]>, - data: Option<&[u8]>, - ) -> MdbxResult { - let key = key.as_ref(); - - #[cfg(debug_assertions)] - { - let pagesize = self.env().stat().map(|s| s.page_size() as usize).unwrap_or(4096); - assertions::debug_assert_key(pagesize, db.flags(), key); - if let Some(v) = data { - assertions::debug_assert_value(pagesize, db.flags(), v); - } - } - - self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::del_raw(txn_ptr, db.dbi(), key, data) } - })? - } - - /// Empties the given database. - pub fn clear_db(&mut self, db: Database) -> MdbxResult<()> { - self.with_txn_ptr(|txn_ptr| { - // SAFETY: txn_ptr is valid from with_txn_ptr. - unsafe { ops::clear_db_raw(txn_ptr, db.dbi()) } - })? - } - - /// Drops the database from the environment. - /// - /// # Safety - /// - /// Caller must ensure all other Database and Cursor instances pointing - /// to this dbi are closed before calling. - pub unsafe fn drop_db(&mut self, db: Database) -> MdbxResult<()> { - self.with_txn_ptr(|txn_ptr| { - // SAFETY: Caller ensures no other references exist. - unsafe { ops::drop_db_raw(txn_ptr, db.dbi()) } - })??; - self.meta.db_cache.remove_dbi(db.dbi()); - Ok(()) - } -} - -impl TxUnsync { - /// Closes the database handle. - /// - /// # Safety - /// - /// This will invalidate data cached in [`Database`] instances, and may - /// result in bad behavior when using those instances after calling this - /// function. - pub unsafe fn close_db(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult<()> { - // SAFETY: Caller ensures no other references exist. - unsafe { ops::close_db_raw(self.meta.env.env_ptr(), dbi) }?; - self.meta.db_cache.remove_dbi(dbi); - Ok(()) - } -} - -impl fmt::Debug for TxUnsync { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Transaction").finish_non_exhaustive() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::tempdir; - - #[test] - fn test_basic_rw_operations() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - - // Write data - let mut txn = TxUnsync::::new(env.clone()).unwrap(); - let db = txn.create_db(None, DatabaseFlags::empty()).unwrap(); - txn.put(db, b"key1", b"value1", WriteFlags::empty()).unwrap(); - txn.put(db, b"key2", b"value2", WriteFlags::empty()).unwrap(); - txn.commit().unwrap(); - - // Read data - let mut txn = TxUnsync::::new(env.clone()).unwrap(); - - let db = txn.open_db(None).unwrap(); - let value: Option> = txn.get(db.dbi(), b"key1").unwrap(); - assert_eq!(value.as_deref(), Some(b"value1".as_slice())); - - let value: Option> = txn.get(db.dbi(), b"key2").unwrap(); - assert_eq!(value.as_deref(), Some(b"value2".as_slice())); - - let value: Option> = txn.get(db.dbi(), b"nonexistent").unwrap(); - assert!(value.is_none()); - } - - #[test] - fn test_db_cache() { - let dir = tempdir().unwrap(); - let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap(); - - // Create named DBs - { - let mut txn = TxUnsync::::new(env.clone()).unwrap(); - txn.create_db(Some("db1"), DatabaseFlags::empty()).unwrap(); - txn.create_db(Some("db2"), DatabaseFlags::empty()).unwrap(); - txn.commit().unwrap(); - } - - let mut txn = TxUnsync::::new(env.clone()).unwrap(); - - let db1_a = txn.open_db(Some("db1")).unwrap(); - let db1_b = txn.open_db(Some("db1")).unwrap(); - let db2 = txn.open_db(Some("db2")).unwrap(); - - assert_eq!(db1_a.dbi(), db1_b.dbi()); - assert_ne!(db1_a.dbi(), db2.dbi()); - } - - #[test] - #[cfg(feature = "read-tx-timeouts")] - fn test_ro_transaction_no_timeout() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - - let mut txn = TxUnsync::::new_no_timeout(env).unwrap(); - let db = txn.open_db(None).unwrap(); - let value: Option> = txn.get(db.dbi(), b"missing").unwrap(); - assert!(value.is_none()); - } -} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 78648f6..97acf7b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -5,94 +5,93 @@ //! functions that work with either variant. #![allow(missing_docs, dead_code)] use signet_libmdbx::{ - Cursor, Database, DatabaseFlags, Environment, MdbxResult, RO, RW, ReadResult, Stat, - TableObject, TxSync, WriteFlags, ffi, - tx::{PtrSyncInner, RoGuard, RwUnsync, TxPtrAccess, unsync}, + Cursor, Database, DatabaseFlags, Environment, MdbxResult, ReadResult, Ro, RoSync, Rw, RwSync, + Stat, TableObject, TransactionKind, TxSync, TxUnsync, WriteFlags, ffi, + tx::{RoTxSync, RoTxUnsync, RwTxSync, RwTxUnsync, WriteMarker}, }; /// Trait for read-write transaction operations used in tests. pub trait TestRwTxn: Sized { - /// The cursor access type for this transaction. - type CursorAccess: TxPtrAccess; + /// The kind + type Kind: TransactionKind + WriteMarker; - fn create_db(&mut self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult; - fn open_db(&mut self, name: Option<&str>) -> MdbxResult; + fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult; + fn open_db(&self, name: Option<&str>) -> MdbxResult; fn get<'a, T: TableObject<'a>>( - &'a mut self, + &'a self, dbi: ffi::MDBX_dbi, key: &[u8], ) -> ReadResult>; - fn put(&mut self, db: Database, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()>; - fn append(&mut self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()>; - fn append_dup(&mut self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()>; - fn del(&mut self, db: Database, key: &[u8], data: Option<&[u8]>) -> MdbxResult; - fn clear_db(&mut self, db: Database) -> MdbxResult<()>; + fn put(&self, db: Database, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()>; + fn append(&self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()>; + fn append_dup(&self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()>; + fn del(&self, db: Database, key: &[u8], data: Option<&[u8]>) -> MdbxResult; + fn clear_db(&self, db: Database) -> MdbxResult<()>; fn commit(self) -> MdbxResult<()>; - fn cursor(&self, db: Database) -> MdbxResult>; - fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult; + fn cursor(&self, db: Database) -> MdbxResult>; + fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult; - /// # Safety + /// # Safety_by_dbi /// Caller must close all other Database and Cursor instances pointing to /// this dbi before calling. - unsafe fn drop_db(&mut self, db: Database) -> MdbxResult<()>; + unsafe fn drop_db(&self, db: Database) -> MdbxResult<()>; } /// Trait for read-only transaction operations used in tests. pub trait TestRoTxn: Sized { - /// The cursor access type for this transaction. - type CursorAccess: TxPtrAccess; + type Kind: TransactionKind; - fn open_db(&mut self, name: Option<&str>) -> MdbxResult; + fn open_db(&self, name: Option<&str>) -> MdbxResult; fn get<'a, T: TableObject<'a>>( - &'a mut self, + &'a self, dbi: ffi::MDBX_dbi, key: &[u8], ) -> ReadResult>; fn commit(self) -> MdbxResult<()>; - fn cursor(&self, db: Database) -> MdbxResult>; - fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult; + fn cursor(&self, db: Database) -> MdbxResult>; + fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult; } -// ============================================================================= +// =============================================================================_by_dbi // V1 Transaction implementations // ============================================================================= -impl TestRwTxn for TxSync { - type CursorAccess = PtrSyncInner; +impl TestRwTxn for RwTxSync { + type Kind = RwSync; - fn create_db(&mut self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { + fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { TxSync::create_db(self, name, flags) } - fn open_db(&mut self, name: Option<&str>) -> MdbxResult { + fn open_db(&self, name: Option<&str>) -> MdbxResult { TxSync::open_db(self, name) } fn get<'a, T: TableObject<'a>>( - &'a mut self, + &'a self, dbi: ffi::MDBX_dbi, key: &[u8], ) -> ReadResult> { TxSync::get(self, dbi, key) } - fn put(&mut self, db: Database, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()> { + fn put(&self, db: Database, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()> { TxSync::put(self, db, key, data, flags) } - fn append(&mut self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { + fn append(&self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { TxSync::append(self, db, key, data) } - fn append_dup(&mut self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { + fn append_dup(&self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { TxSync::append_dup(self, db, key, data) } - fn del(&mut self, db: Database, key: &[u8], data: Option<&[u8]>) -> MdbxResult { + fn del(&self, db: Database, key: &[u8], data: Option<&[u8]>) -> MdbxResult { TxSync::del(self, db, key, data) } - fn clear_db(&mut self, db: Database) -> MdbxResult<()> { + fn clear_db(&self, db: Database) -> MdbxResult<()> { TxSync::clear_db(self, db) } @@ -100,29 +99,29 @@ impl TestRwTxn for TxSync { TxSync::commit(self).map(|_| ()) } - fn cursor(&self, db: Database) -> MdbxResult> { + fn cursor(&self, db: Database) -> MdbxResult> { TxSync::cursor(self, db) } - fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult { - TxSync::db_stat(self, dbi) + fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { + TxSync::db_stat_by_dbi(self, dbi) } - unsafe fn drop_db(&mut self, db: Database) -> MdbxResult<()> { + unsafe fn drop_db(&self, db: Database) -> MdbxResult<()> { // SAFETY: Caller ensures no other references to dbi exist. unsafe { TxSync::drop_db(self, db) } } } -impl TestRoTxn for TxSync { - type CursorAccess = PtrSyncInner; +impl TestRoTxn for RoTxSync { + type Kind = RoSync; - fn open_db(&mut self, name: Option<&str>) -> MdbxResult { + fn open_db(&self, name: Option<&str>) -> MdbxResult { TxSync::open_db(self, name) } fn get<'a, T: TableObject<'a>>( - &'a mut self, + &'a self, dbi: ffi::MDBX_dbi, key: &[u8], ) -> ReadResult> { @@ -133,12 +132,12 @@ impl TestRoTxn for TxSync { TxSync::commit(self).map(|_| ()) } - fn cursor(&self, db: Database) -> MdbxResult> { + fn cursor(&self, db: Database) -> MdbxResult> { TxSync::cursor(self, db) } - fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult { - TxSync::db_stat(self, dbi) + fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { + TxSync::db_stat_by_dbi(self, dbi) } } @@ -146,88 +145,88 @@ impl TestRoTxn for TxSync { // V2 Transaction implementations // ============================================================================= -impl TestRwTxn for unsync::TxUnsync { - type CursorAccess = RwUnsync; +impl TestRwTxn for RwTxUnsync { + type Kind = Rw; - fn create_db(&mut self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { - unsync::TxUnsync::create_db(self, name, flags) + fn create_db(&self, name: Option<&str>, flags: DatabaseFlags) -> MdbxResult { + TxUnsync::create_db(self, name, flags) } - fn open_db(&mut self, name: Option<&str>) -> MdbxResult { - unsync::TxUnsync::open_db(self, name) + fn open_db(&self, name: Option<&str>) -> MdbxResult { + TxUnsync::open_db(self, name) } fn get<'a, T: TableObject<'a>>( - &'a mut self, + &'a self, dbi: ffi::MDBX_dbi, key: &[u8], ) -> ReadResult> { - unsync::TxUnsync::get(self, dbi, key) + TxUnsync::get(self, dbi, key) } - fn put(&mut self, db: Database, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()> { - unsync::TxUnsync::put(self, db, key, data, flags) + fn put(&self, db: Database, key: &[u8], data: &[u8], flags: WriteFlags) -> MdbxResult<()> { + TxUnsync::put(self, db, key, data, flags) } - fn append(&mut self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { - unsync::TxUnsync::append(self, db, key, data) + fn append(&self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { + TxUnsync::append(self, db, key, data) } - fn append_dup(&mut self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { - unsync::TxUnsync::append_dup(self, db, key, data) + fn append_dup(&self, db: Database, key: &[u8], data: &[u8]) -> MdbxResult<()> { + TxUnsync::append_dup(self, db, key, data) } - fn del(&mut self, db: Database, key: &[u8], data: Option<&[u8]>) -> MdbxResult { - unsync::TxUnsync::del(self, db, key, data) + fn del(&self, db: Database, key: &[u8], data: Option<&[u8]>) -> MdbxResult { + TxUnsync::del(self, db, key, data) } - fn clear_db(&mut self, db: Database) -> MdbxResult<()> { - unsync::TxUnsync::clear_db(self, db) + fn clear_db(&self, db: Database) -> MdbxResult<()> { + TxUnsync::clear_db(self, db) } fn commit(self) -> MdbxResult<()> { - unsync::TxUnsync::commit(self) + TxUnsync::commit(self) } - fn cursor(&self, db: Database) -> MdbxResult> { - unsync::TxUnsync::cursor(self, db) + fn cursor(&self, db: Database) -> MdbxResult> { + TxUnsync::cursor(self, db) } - fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult { - unsync::TxUnsync::db_stat(self, dbi) + fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { + TxUnsync::db_stat_by_dbi(self, dbi) } - unsafe fn drop_db(&mut self, db: Database) -> MdbxResult<()> { + unsafe fn drop_db(&self, db: Database) -> MdbxResult<()> { // SAFETY: Caller ensures no other references to dbi exist. - unsafe { unsync::TxUnsync::drop_db(self, db) } + unsafe { TxUnsync::drop_db(self, db) } } } -impl TestRoTxn for unsync::TxUnsync { - type CursorAccess = RoGuard; +impl TestRoTxn for TxUnsync { + type Kind = Ro; - fn open_db(&mut self, name: Option<&str>) -> MdbxResult { - unsync::TxUnsync::open_db(self, name) + fn open_db(&self, name: Option<&str>) -> MdbxResult { + TxUnsync::open_db(self, name) } fn get<'a, T: TableObject<'a>>( - &'a mut self, + &'a self, dbi: ffi::MDBX_dbi, key: &[u8], ) -> ReadResult> { - unsync::TxUnsync::get(self, dbi, key) + TxUnsync::get(self, dbi, key) } fn commit(self) -> MdbxResult<()> { - unsync::TxUnsync::commit(self) + TxUnsync::commit(self) } - fn cursor(&self, db: Database) -> MdbxResult> { - unsync::TxUnsync::cursor(self, db) + fn cursor(&self, db: Database) -> MdbxResult> { + TxUnsync::cursor(self, db) } - fn db_stat(&mut self, dbi: ffi::MDBX_dbi) -> MdbxResult { - unsync::TxUnsync::db_stat(self, dbi) + fn db_stat(&self, dbi: ffi::MDBX_dbi) -> MdbxResult { + TxUnsync::db_stat_by_dbi(self, dbi) } } @@ -239,12 +238,12 @@ impl TestRoTxn for unsync::TxUnsync { pub struct V1Factory; impl V1Factory { - pub fn begin_rw(env: &Environment) -> MdbxResult> { - env.begin_rw_txn() + pub fn begin_rw(env: &Environment) -> MdbxResult { + env.begin_rw_sync() } - pub fn begin_ro(env: &Environment) -> MdbxResult> { - env.begin_ro_txn() + pub fn begin_ro(env: &Environment) -> MdbxResult { + env.begin_ro_sync() } } @@ -252,11 +251,11 @@ impl V1Factory { pub struct V2Factory; impl V2Factory { - pub fn begin_rw(env: &Environment) -> MdbxResult> { + pub fn begin_rw(env: &Environment) -> MdbxResult { env.begin_rw_unsync() } - pub fn begin_ro(env: &Environment) -> MdbxResult> { + pub fn begin_ro(env: &Environment) -> MdbxResult { env.begin_ro_unsync() } } diff --git a/tests/cursor.rs b/tests/cursor.rs index f7beedd..291e5d3 100644 --- a/tests/cursor.rs +++ b/tests/cursor.rs @@ -2,8 +2,8 @@ mod common; use common::{TestRoTxn, TestRwTxn, V1Factory, V2Factory}; use signet_libmdbx::{ - DatabaseFlags, Environment, MdbxError, MdbxResult, ObjectLength, ReadError, ReadResult, - WriteFlags, tx::TxPtrAccess, + Cursor, DatabaseFlags, Environment, MdbxError, MdbxResult, ObjectLength, ReadError, ReadResult, + TransactionKind, WriteFlags, }; use std::{borrow::Cow, hint::black_box}; use tempfile::tempdir; @@ -25,7 +25,7 @@ fn test_get_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(None, txn.cursor(db).unwrap().first::<(), ()>().unwrap()); @@ -65,7 +65,7 @@ fn test_get_dup_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); @@ -124,7 +124,7 @@ fn test_get_dupfixed_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); @@ -167,7 +167,7 @@ fn test_iter_impl( ]; { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); for (key, data) in &items { txn.put(db, key, data, WriteFlags::empty()).unwrap(); @@ -175,7 +175,7 @@ fn test_iter_impl( txn.commit().unwrap(); } - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -231,7 +231,7 @@ fn test_iter_empty_database_impl( { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -260,11 +260,11 @@ fn test_iter_empty_dup_database_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.commit().unwrap(); - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -298,7 +298,7 @@ fn test_iter_dup_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.commit().unwrap(); @@ -321,7 +321,7 @@ fn test_iter_dup_impl( .collect(); { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); for (key, data) in items.clone() { let db = txn.open_db(None).unwrap(); txn.put(db, &key, &data, WriteFlags::empty()).unwrap(); @@ -329,7 +329,7 @@ fn test_iter_dup_impl( txn.commit().unwrap(); } - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); assert_eq!(items, cursor.iter_dup().flatten().flatten().collect::>>().unwrap()); @@ -419,7 +419,7 @@ fn test_iter_del_get_impl( let items = vec![(*b"a", *b"1"), (*b"b", *b"2")]; { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); assert_eq!( txn.cursor(db) @@ -435,7 +435,7 @@ fn test_iter_del_get_impl( } { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); for (key, data) in &items { txn.put(db, key, data, WriteFlags::empty()).unwrap(); @@ -443,7 +443,7 @@ fn test_iter_del_get_impl( txn.commit().unwrap(); } - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); assert_eq!( @@ -491,7 +491,7 @@ fn test_put_del_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -539,7 +539,7 @@ fn test_dup_sort_validation_on_non_dupsort_db_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); // Non-DUPSORT database txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); @@ -586,7 +586,7 @@ fn test_dup_fixed_validation_on_non_dupfixed_db_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); // Create DUPSORT but NOT DUPFIXED database let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); @@ -625,7 +625,7 @@ fn test_dup_sort_methods_work_on_dupsort_db_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); @@ -662,7 +662,7 @@ fn test_dup_fixed_methods_work_on_dupfixed_db_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); @@ -697,14 +697,14 @@ fn test_iter_exhausted_cursor_repositions_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); for i in 0u8..100 { txn.put(db, &[i], &[i], WriteFlags::empty()).unwrap(); } txn.commit().unwrap(); - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -746,7 +746,7 @@ fn test_iter_benchmark_pattern_impl( let n = 100u32; - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); for i in 0..n { let key = format!("key{i}"); @@ -756,7 +756,7 @@ fn test_iter_benchmark_pattern_impl( txn.commit().unwrap(); // Setup like benchmark: transaction and db outside the "iteration" - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); // Run the benchmark closure multiple times to match criterion behavior @@ -779,9 +779,7 @@ fn test_iter_benchmark_pattern_impl( } // Loop 3: internal iterate function (doesn't affect count) - fn iterate( - cursor: &mut signet_libmdbx::Cursor, - ) -> ReadResult<()> { + fn iterate(cursor: &mut Cursor) -> ReadResult<()> { for result in cursor.iter::() { let (key_len, data_len) = result?; black_box(*key_len + *data_len); @@ -805,190 +803,6 @@ fn test_iter_benchmark_pattern_v2() { test_iter_benchmark_pattern_impl(V2Factory::begin_rw, V2Factory::begin_ro); } -// ============================================================================= -// Timeout Tests (V2 only with read-tx-timeouts feature) -// ============================================================================= - -#[cfg(feature = "read-tx-timeouts")] -mod timeout_tests { - use signet_libmdbx::*; - use std::time::Duration; - use tempfile::tempdir; - - const SHORT_TIMEOUT: Duration = Duration::from_millis(100); - const WAIT_FOR_TIMEOUT: Duration = Duration::from_millis(200); - - /// Test that cursor operations fail after RO transaction times out. - #[test] - fn test_cursor_operations_fail_after_timeout() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - - // Write some data - { - let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); - txn.commit().unwrap(); - } - - // Create a v2 transaction with short timeout - let mut txn = env.begin_ro_single_thread_with_timeout(SHORT_TIMEOUT).unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(db).unwrap(); - - // Cursor should work before timeout - assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); - - // Wait for timeout - std::thread::sleep(WAIT_FOR_TIMEOUT); - - // Cursor operations should now fail with ReadTransactionTimeout - let err = cursor.first::<(), ()>().unwrap_err(); - assert!( - matches!(err, ReadError::Mdbx(MdbxError::ReadTransactionTimeout)), - "Expected ReadTransactionTimeout, got: {err:?}" - ); - } - - /// Test cursor cleanup after timeout (no memory leak). - #[test] - fn test_cursor_cleanup_after_timeout() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - - // Write some data - { - let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.commit().unwrap(); - } - - // Create transaction with short timeout - let mut txn = env.begin_ro_single_thread_with_timeout(SHORT_TIMEOUT).unwrap(); - let db = txn.open_db(None).unwrap(); - let cursor = txn.cursor(db).unwrap(); - - // Wait for timeout - std::thread::sleep(WAIT_FOR_TIMEOUT); - - // Dropping cursor after timeout should not panic - drop(cursor); - drop(txn); - - // Environment should still be usable - let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(db).unwrap(); - assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); - } - - /// Test multiple cursors cleanup after timeout. - #[test] - fn test_multiple_cursors_cleanup_after_timeout() { - let dir = tempdir().unwrap(); - let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap(); - - // Create databases and write data - { - let txn = env.begin_rw_txn().unwrap(); - let db1 = txn.create_db(Some("db1"), DatabaseFlags::empty()).unwrap(); - let db2 = txn.create_db(Some("db2"), DatabaseFlags::empty()).unwrap(); - txn.put(db1, b"k1", b"v1", WriteFlags::empty()).unwrap(); - txn.put(db2, b"k2", b"v2", WriteFlags::empty()).unwrap(); - txn.commit().unwrap(); - } - - // Create transaction with short timeout and multiple cursors - let mut txn = env.begin_ro_single_thread_with_timeout(SHORT_TIMEOUT).unwrap(); - let db1 = txn.open_db(Some("db1")).unwrap(); - let db2 = txn.open_db(Some("db2")).unwrap(); - let cursor1 = txn.cursor(db1).unwrap(); - let cursor2 = txn.cursor(db2).unwrap(); - - // Wait for timeout - std::thread::sleep(WAIT_FOR_TIMEOUT); - - // Dropping all cursors after timeout should not panic - drop(cursor1); - drop(cursor2); - drop(txn); - - // Verify environment is still usable - let txn = env.begin_ro_txn().unwrap(); - let db = txn.open_db(Some("db1")).unwrap(); - let mut cursor = txn.cursor(db).unwrap(); - assert_eq!(cursor.first().unwrap(), Some((*b"k1", *b"v1"))); - } - - /// Test that transactions without timeout work correctly. - #[test] - fn test_no_timeout_transaction_works() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - - // Write data - { - let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); - txn.commit().unwrap(); - } - - // Create transaction without timeout - let mut txn = env.begin_ro_single_thread_no_timeout().unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(db).unwrap(); - - // Cursor should work before the "normal" timeout period - assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); - - // Wait longer than the short timeout (but not forever) - std::thread::sleep(WAIT_FOR_TIMEOUT); - - // Cursor should still work since there's no timeout - assert_eq!(cursor.first().unwrap(), Some((*b"key1", *b"val1"))); - } - - /// Test iterator behavior after timeout. - #[test] - fn test_iter_fails_after_timeout() { - let dir = tempdir().unwrap(); - let env = Environment::builder().open(dir.path()).unwrap(); - - // Write data - { - let txn = env.begin_rw_txn().unwrap(); - let db = txn.open_db(None).unwrap(); - for i in 0u8..10 { - txn.put(db, [i], [i], WriteFlags::empty()).unwrap(); - } - txn.commit().unwrap(); - } - - // Create transaction with short timeout - let mut txn = env.begin_ro_single_thread_with_timeout(SHORT_TIMEOUT).unwrap(); - let db = txn.open_db(None).unwrap(); - let mut cursor = txn.cursor(db).unwrap(); - - // Start iterating before timeout - let mut iter = cursor.iter::<[u8; 1], [u8; 1]>(); - assert!(iter.next().unwrap().is_ok()); - - // Wait for timeout - std::thread::sleep(WAIT_FOR_TIMEOUT); - - // Iterator should return timeout error - let err = iter.next().unwrap().unwrap_err(); - assert!( - matches!(err, ReadError::Mdbx(MdbxError::ReadTransactionTimeout)), - "Expected ReadTransactionTimeout, got: {err:?}" - ); - } -} - // ============================================================================= // Append API Tests // ============================================================================= @@ -1004,7 +818,7 @@ fn test_cursor_append_impl( let env = Environment::builder().open(dir.path()).unwrap(); // Append keys in sorted order: a, b, c - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1016,7 +830,7 @@ fn test_cursor_append_impl( txn.commit().unwrap(); // Verify data was written correctly - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1048,7 +862,7 @@ fn test_tx_append_impl( // Write using transaction-level append { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.append(db, b"key1", b"val1").unwrap(); @@ -1059,7 +873,7 @@ fn test_tx_append_impl( } // Verify data was written correctly - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1089,7 +903,7 @@ fn test_append_dup_impl( let env = Environment::builder().open(dir.path()).unwrap(); // Create DUPSORT database and append duplicates - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1102,7 +916,7 @@ fn test_append_dup_impl( txn.commit().unwrap(); // Verify - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1130,7 +944,7 @@ where let env = Environment::builder().open(dir.path()).unwrap(); // Try append_dup on non-DUPSORT database - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); // Non-DUPSORT database let mut cursor = txn.cursor(db).unwrap(); @@ -1160,7 +974,7 @@ mod append_debug_tests { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1177,7 +991,7 @@ mod append_debug_tests { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1194,7 +1008,7 @@ mod append_debug_tests { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Insert "b" first @@ -1210,7 +1024,7 @@ mod append_debug_tests { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); // Insert duplicate "2" first @@ -1231,7 +1045,7 @@ mod append_debug_tests { let env = Environment::builder().open(dir.path()).unwrap(); // Create REVERSE_KEY database - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::REVERSE_KEY).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1246,7 +1060,7 @@ mod append_debug_tests { txn.commit().unwrap(); // Verify data was written - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_sync().unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1264,7 +1078,7 @@ mod append_debug_tests { let env = Environment::builder().open(dir.path()).unwrap(); // Create REVERSE_KEY database - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::REVERSE_KEY).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1289,7 +1103,7 @@ mod append_debug_tests { let env = Environment::builder().open(dir.path()).unwrap(); // Create REVERSE_DUP database - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::REVERSE_DUP).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1303,7 +1117,7 @@ mod append_debug_tests { txn.commit().unwrap(); // Verify data was written - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_sync().unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -1321,7 +1135,7 @@ mod append_debug_tests { let env = Environment::builder().open(dir.path()).unwrap(); // Create REVERSE_DUP database - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT | DatabaseFlags::REVERSE_DUP).unwrap(); let mut cursor = txn.cursor(db).unwrap(); diff --git a/tests/environment.rs b/tests/environment.rs index 686d9f9..9f61d23 100644 --- a/tests/environment.rs +++ b/tests/environment.rs @@ -25,16 +25,16 @@ fn test_begin_txn() { // writable environment let env = Environment::builder().open(dir.path()).unwrap(); - env.begin_rw_txn().unwrap(); - env.begin_ro_txn().unwrap(); + env.begin_rw_sync().unwrap(); + env.begin_ro_sync().unwrap(); } { // read-only environment let env = Environment::builder().set_flags(Mode::ReadOnly.into()).open(dir.path()).unwrap(); - env.begin_rw_txn().unwrap_err(); - env.begin_ro_txn().unwrap(); + env.begin_rw_sync().unwrap_err(); + env.begin_ro_sync().unwrap(); } } @@ -43,7 +43,7 @@ fn test_open_db() { let dir = tempdir().unwrap(); let env = Environment::builder().set_max_dbs(1).open(dir.path()).unwrap(); - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_sync().unwrap(); txn.open_db(None).unwrap(); txn.open_db(Some("testdb")).unwrap_err(); } @@ -53,7 +53,7 @@ fn test_create_db() { let dir = tempdir().unwrap(); let env = Environment::builder().set_max_dbs(11).open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); txn.open_db(Some("testdb")).unwrap_err(); txn.create_db(Some("testdb"), DatabaseFlags::empty()).unwrap(); txn.open_db(Some("testdb")).unwrap(); @@ -64,7 +64,7 @@ fn test_close_database() { let dir = tempdir().unwrap(); let env = Environment::builder().set_max_dbs(10).open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); txn.create_db(Some("db"), DatabaseFlags::empty()).unwrap(); txn.open_db(Some("db")).unwrap(); } @@ -99,7 +99,7 @@ fn test_stat() { for i in 0..64 { let mut value = [0u8; 8]; LittleEndian::write_u64(&mut value, i); - let tx = env.begin_rw_txn().expect("begin_rw_txn"); + let tx = env.begin_rw_sync().expect("begin_rw_sync"); let db = tx.open_db(None).unwrap(); tx.put(db, value, value, WriteFlags::default()).expect("tx.put"); tx.commit().expect("tx.commit"); @@ -154,12 +154,12 @@ fn test_freelist() { for i in 0..64 { let mut value = [0u8; 8]; LittleEndian::write_u64(&mut value, i); - let tx = env.begin_rw_txn().expect("begin_rw_txn"); + let tx = env.begin_rw_sync().expect("begin_rw_sync"); let db = tx.open_db(None).unwrap(); tx.put(db, value, value, WriteFlags::default()).expect("tx.put"); tx.commit().expect("tx.commit"); } - let tx = env.begin_rw_txn().expect("begin_rw_txn"); + let tx = env.begin_rw_sync().expect("begin_rw_sync"); let db = tx.open_db(None).unwrap(); tx.clear_db(db).expect("clear"); tx.commit().expect("tx.commit"); diff --git a/tests/proptest_inputs.rs b/tests/proptest_inputs.rs index 3b624e7..c3029d3 100644 --- a/tests/proptest_inputs.rs +++ b/tests/proptest_inputs.rs @@ -35,7 +35,7 @@ proptest! { fn put_get_arbitrary_kv_v1(key in arb_bytes(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Should not panic - may return error for invalid sizes @@ -52,7 +52,7 @@ proptest! { fn del_nonexistent_key_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Delete on nonexistent key should return Ok(false), not panic @@ -66,7 +66,7 @@ proptest! { fn get_arbitrary_key_empty_db_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Get on nonexistent key should return Ok(None), not panic @@ -88,7 +88,7 @@ proptest! { fn put_get_arbitrary_kv_v2(key in arb_bytes(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); // Should not panic - may return error for invalid sizes @@ -105,7 +105,7 @@ proptest! { fn del_nonexistent_key_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); // Delete on nonexistent key should return Ok(false), not panic @@ -119,7 +119,7 @@ proptest! { fn get_arbitrary_key_empty_db_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_ro_unsync().unwrap(); + let txn = env.begin_ro_unsync().unwrap(); let db = txn.open_db(None).unwrap(); // Get on nonexistent key should return Ok(None), not panic @@ -141,7 +141,7 @@ proptest! { fn cursor_set_arbitrary_key_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Add some data so cursor is positioned @@ -159,7 +159,7 @@ proptest! { fn cursor_set_range_arbitrary_key_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Add some data @@ -179,7 +179,7 @@ proptest! { fn cursor_set_key_arbitrary_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"test", b"value", WriteFlags::empty()).unwrap(); @@ -197,7 +197,7 @@ proptest! { fn cursor_set_arbitrary_key_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"test_key", b"test_val", WriteFlags::empty()).unwrap(); @@ -213,7 +213,7 @@ proptest! { fn cursor_set_range_arbitrary_key_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"aaa", b"val_a", WriteFlags::empty()).unwrap(); @@ -231,7 +231,7 @@ proptest! { fn cursor_set_key_arbitrary_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"test", b"value", WriteFlags::empty()).unwrap(); @@ -259,7 +259,7 @@ proptest! { .set_max_dbs(16) .open(dir.path()) .unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); // create_db should not panic, may return error for invalid names let result = txn.create_db(Some(&name), DatabaseFlags::empty()); @@ -275,7 +275,7 @@ proptest! { .set_max_dbs(16) .open(dir.path()) .unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let result = txn.create_db(Some(&name), DatabaseFlags::empty()); let _ = result; @@ -297,7 +297,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); for value in &values { @@ -316,7 +316,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); for value in &values { @@ -333,7 +333,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); // Add some data @@ -356,7 +356,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); @@ -378,7 +378,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); @@ -399,7 +399,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); @@ -425,7 +425,7 @@ proptest! { fn iter_from_arbitrary_key_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Add some data @@ -449,7 +449,7 @@ proptest! { fn iter_from_arbitrary_key_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); for i in 0u8..10 { @@ -470,7 +470,7 @@ proptest! { fn iter_dup_of_arbitrary_key_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); // Add some dup data @@ -493,7 +493,7 @@ proptest! { fn iter_dup_from_arbitrary_key_v1(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); for i in 0u8..5 { @@ -518,7 +518,7 @@ proptest! { fn iter_dup_of_arbitrary_key_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); for i in 0u8..5 { @@ -538,7 +538,7 @@ proptest! { fn iter_dup_from_arbitrary_key_v2(key in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); for i in 0u8..5 { @@ -578,7 +578,7 @@ proptest! { fn cursor_put_arbitrary_v1(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -594,7 +594,7 @@ proptest! { fn cursor_put_arbitrary_v2(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let mut cursor = txn.cursor(db).unwrap(); @@ -616,7 +616,7 @@ proptest! { fn empty_key_operations_v1(value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Empty key should be valid @@ -639,7 +639,7 @@ proptest! { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Empty value should be valid @@ -657,7 +657,7 @@ proptest! { fn empty_key_operations_v2(value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, b"", &value, WriteFlags::empty()); @@ -678,7 +678,7 @@ proptest! { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, b"", WriteFlags::empty()); @@ -703,7 +703,7 @@ proptest! { fn roundtrip_correctness_v1(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, &value, WriteFlags::empty()); @@ -726,7 +726,7 @@ proptest! { fn roundtrip_correctness_v2(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, &value, WriteFlags::empty()); @@ -753,7 +753,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let put1 = txn.put(db, &key, &value1, WriteFlags::empty()); @@ -782,7 +782,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let put1 = txn.put(db, &key, &value1, WriteFlags::empty()); @@ -807,7 +807,7 @@ proptest! { fn delete_correctness_v1(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, &value, WriteFlags::empty()); @@ -833,7 +833,7 @@ proptest! { fn delete_correctness_v2(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, &value, WriteFlags::empty()); @@ -862,7 +862,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); // Insert all values @@ -910,7 +910,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); let mut inserted: Vec> = Vec::new(); @@ -953,7 +953,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); // Insert all entries @@ -998,7 +998,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let mut inserted_keys: Vec> = Vec::new(); @@ -1038,7 +1038,7 @@ proptest! { fn cursor_set_correctness_v1(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, &value, WriteFlags::empty()); @@ -1062,7 +1062,7 @@ proptest! { fn cursor_set_correctness_v2(key in arb_safe_key(), value in arb_bytes()) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let put_result = txn.put(db, &key, &value, WriteFlags::empty()); @@ -1089,7 +1089,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); let mut inserted: Vec<(Vec, Vec)> = Vec::new(); @@ -1132,7 +1132,7 @@ proptest! { ) { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); let mut inserted: Vec<(Vec, Vec)> = Vec::new(); diff --git a/tests/transaction.rs b/tests/transaction.rs index 2b6fb1c..14e2a7c 100644 --- a/tests/transaction.rs +++ b/tests/transaction.rs @@ -24,14 +24,14 @@ fn test_put_get_del_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); txn.put(db, b"key3", b"val3", WriteFlags::empty()).unwrap(); txn.commit().unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(txn.get(db.dbi(), b"key1").unwrap(), Some(*b"val1")); assert_eq!(txn.get(db.dbi(), b"key2").unwrap(), Some(*b"val2")); @@ -62,7 +62,7 @@ fn test_put_get_del_multi_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); @@ -75,7 +75,7 @@ fn test_put_get_del_multi_impl( txn.put(db, b"key3", b"val3", WriteFlags::empty()).unwrap(); txn.commit().unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); { let mut cur = txn.cursor(db).unwrap(); @@ -85,13 +85,13 @@ fn test_put_get_del_multi_impl( } txn.commit().unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.del(db, b"key1", Some(b"val2")).unwrap(); txn.del(db, b"key2", None).unwrap(); txn.commit().unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); { let mut cur = txn.cursor(db).unwrap(); @@ -125,13 +125,13 @@ fn test_put_get_del_empty_key_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, Default::default()).unwrap(); txn.put(db, b"", b"hello", WriteFlags::empty()).unwrap(); assert_eq!(txn.get(db.dbi(), b"").unwrap(), Some(*b"hello")); txn.commit().unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(txn.get(db.dbi(), b"").unwrap(), Some(*b"hello")); txn.put(db, b"", b"", WriteFlags::empty()).unwrap(); @@ -159,20 +159,20 @@ fn test_clear_db_impl( let env = Environment::builder().open(dir.path()).unwrap(); { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"key", b"val", WriteFlags::empty()).unwrap(); txn.commit().unwrap(); } { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.clear_db(db).unwrap(); txn.commit().unwrap(); } - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(txn.get::<()>(db.dbi(), b"key").unwrap(), None); } @@ -199,7 +199,7 @@ fn test_drop_db_impl( let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap(); { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(Some("test"), DatabaseFlags::empty()).unwrap(); txn.put(db, b"key", b"val", WriteFlags::empty()).unwrap(); // Workaround for MDBX dbi drop issue @@ -207,7 +207,7 @@ fn test_drop_db_impl( txn.commit().unwrap(); } { - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(Some("test")).unwrap(); unsafe { txn.drop_db(db).unwrap(); @@ -219,7 +219,7 @@ fn test_drop_db_impl( let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap(); - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); txn.open_db(Some("canary")).unwrap(); assert!(matches!(txn.open_db(Some("test")).unwrap_err(), MdbxError::NotFound)); } @@ -244,7 +244,7 @@ fn test_stat_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::empty()).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key2", b"val2", WriteFlags::empty()).unwrap(); @@ -252,26 +252,26 @@ fn test_stat_impl( txn.commit().unwrap(); { - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let stat = txn.db_stat(db.dbi()).unwrap(); assert_eq!(stat.entries(), 3); } - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.del(db, b"key1", None).unwrap(); txn.del(db, b"key2", None).unwrap(); txn.commit().unwrap(); { - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let stat = txn.db_stat(db.dbi()).unwrap(); assert_eq!(stat.entries(), 1); } - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"key4", b"val4", WriteFlags::empty()).unwrap(); txn.put(db, b"key5", b"val5", WriteFlags::empty()).unwrap(); @@ -279,7 +279,7 @@ fn test_stat_impl( txn.commit().unwrap(); { - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let stat = txn.db_stat(db.dbi()).unwrap(); assert_eq!(stat.entries(), 4); @@ -306,7 +306,7 @@ fn test_stat_dupsort_impl( let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.create_db(None, DatabaseFlags::DUP_SORT).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key1", b"val2", WriteFlags::empty()).unwrap(); @@ -320,26 +320,26 @@ fn test_stat_dupsort_impl( txn.commit().unwrap(); { - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let stat = txn.db_stat(db.dbi()).unwrap(); assert_eq!(stat.entries(), 9); } - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.del(db, b"key1", Some(b"val2")).unwrap(); txn.del(db, b"key2", None).unwrap(); txn.commit().unwrap(); { - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let stat = txn.db_stat(db.dbi()).unwrap(); assert_eq!(stat.entries(), 5); } - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"key4", b"val1", WriteFlags::empty()).unwrap(); txn.put(db, b"key4", b"val2", WriteFlags::empty()).unwrap(); @@ -347,7 +347,7 @@ fn test_stat_dupsort_impl( txn.commit().unwrap(); { - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(None).unwrap(); let stat = txn.db_stat(db.dbi()).unwrap(); assert_eq!(stat.entries(), 8); @@ -374,12 +374,12 @@ fn test_open_db_cached_returns_same_handle_impl( let dir = tempdir().unwrap(); let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); txn.create_db(Some("test"), DatabaseFlags::empty()).unwrap(); txn.commit().unwrap(); // Test that open_db_cached returns the same dbi for the same name - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db1 = txn.open_db(None).unwrap(); let db2 = txn.open_db(None).unwrap(); @@ -413,7 +413,7 @@ fn test_database_flags_getter_impl( let dir = tempdir().unwrap(); let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); // Create a database with DUP_SORT flag let db = txn.create_db(Some("dupsort"), DatabaseFlags::DUP_SORT).unwrap(); @@ -426,7 +426,7 @@ fn test_database_flags_getter_impl( txn.commit().unwrap(); // Verify flags persist after reopening - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); let db = txn.open_db(Some("dupsort")).unwrap(); assert!(db.flags().contains(DatabaseFlags::DUP_SORT)); } @@ -451,12 +451,12 @@ fn test_cached_db_has_correct_flags_impl( let dir = tempdir().unwrap(); let env = Environment::builder().set_max_dbs(2).open(dir.path()).unwrap(); - let mut txn = begin_rw(&env).unwrap(); + let txn = begin_rw(&env).unwrap(); txn.create_db(Some("dupsort"), DatabaseFlags::DUP_SORT).unwrap(); txn.commit().unwrap(); // Test that cached open returns correct flags - let mut txn = begin_ro(&env).unwrap(); + let txn = begin_ro(&env).unwrap(); // First open - cache miss, goes through FFI let db1 = txn.open_db(Some("dupsort")).unwrap(); @@ -490,7 +490,7 @@ fn test_reserve_v1() { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); { unsafe { @@ -501,7 +501,7 @@ fn test_reserve_v1() { } txn.commit().unwrap(); - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(txn.get(db.dbi(), b"key1").unwrap(), Some(*b"val1")); assert_eq!(txn.get::<()>(db.dbi(), b"key").unwrap(), None); @@ -516,7 +516,7 @@ fn test_reserve_v2() { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); { unsafe { @@ -527,7 +527,7 @@ fn test_reserve_v2() { } txn.commit().unwrap(); - let mut txn = env.begin_rw_unsync().unwrap(); + let txn = env.begin_rw_unsync().unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(txn.get(db.dbi(), b"key1").unwrap(), Some(*b"val1")); assert_eq!(txn.get::<()>(db.dbi(), b"key").unwrap(), None); @@ -542,7 +542,7 @@ fn test_nested_txn() { let dir = tempdir().unwrap(); let env = Environment::builder().open(dir.path()).unwrap(); - let mut txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, b"key1", b"val1", WriteFlags::empty()).unwrap(); @@ -578,21 +578,21 @@ fn test_concurrent_readers_single_writer() { threads.push(thread::spawn(move || { { - let txn = reader_env.begin_ro_txn().unwrap(); + let txn = reader_env.begin_ro_sync().unwrap(); let db = txn.open_db(None).unwrap(); assert_eq!(txn.get::<()>(db.dbi(), key).unwrap(), None); } reader_barrier.wait(); reader_barrier.wait(); { - let txn = reader_env.begin_ro_txn().unwrap(); + let txn = reader_env.begin_ro_sync().unwrap(); let db = txn.open_db(None).unwrap(); txn.get::<[u8; 3]>(db.dbi(), key).unwrap().unwrap() == *val } })); } - let txn = env.begin_rw_txn().unwrap(); + let txn = env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); barrier.wait(); @@ -620,7 +620,7 @@ fn test_concurrent_writers() { let writer_env = env.clone(); threads.push(thread::spawn(move || { - let txn = writer_env.begin_rw_txn().unwrap(); + let txn = writer_env.begin_rw_sync().unwrap(); let db = txn.open_db(None).unwrap(); txn.put(db, format!("{key}{i}"), format!("{val}{i}"), WriteFlags::empty()).unwrap(); txn.commit().is_ok() @@ -628,7 +628,7 @@ fn test_concurrent_writers() { } assert!(threads.into_iter().all(|b| b.join().unwrap())); - let txn = env.begin_ro_txn().unwrap(); + let txn = env.begin_ro_sync().unwrap(); let db = txn.open_db(None).unwrap(); for i in 0..n {