diff --git a/examples/arena_compact_tests/src/main.rs b/examples/arena_compact_tests/src/main.rs index 576eef7..4258353 100644 --- a/examples/arena_compact_tests/src/main.rs +++ b/examples/arena_compact_tests/src/main.rs @@ -29,6 +29,8 @@ fn arena_create() -> Result<(), std::io::Error> { println!("len {:.2?}", tree.get_data().len()); // pathmap::alloc_tracking::read().print(); // pathmap::alloc_tracking::reset(); + pathmap::timed_span::print_counters(); + pathmap::timed_span::reset_counters(); let start = Instant::now(); let mut zipper = tree.read_zipper(); @@ -38,6 +40,8 @@ fn arena_create() -> Result<(), std::io::Error> { assert!(zipper.descend_to_existing(path) == path.len()); // assert_eq!(zipper.path(), path); } + pathmap::timed_span::print_counters(); + println!("checked act in {:.2?}", start.elapsed()); let start = Instant::now(); let tree2 = ArenaCompactTree::from_zipper(tree.read_zipper(), |_v| 0); @@ -78,6 +82,7 @@ fn arena_dump() -> Result<(), std::io::Error> { assert!(zipper.descend_to_existing(path) == path.len()); // assert_eq!(zipper.path(), path); } + pathmap::timed_span::print_counters(); println!("checked act in {:.2?}", start.elapsed()); let start = Instant::now(); let tree2 = ArenaCompactTree::from_zipper(tree.read_zipper(), |_v| 0); diff --git a/src/arena_compact.rs b/src/arena_compact.rs index 5206398..db2917c 100644 --- a/src/arena_compact.rs +++ b/src/arena_compact.rs @@ -80,6 +80,7 @@ use std::{io::Write, hash::Hasher}; use crate::{ morphisms::Catamorphism, utils::{BitMask, ByteMask, find_prefix_overlap}, + timed_span::timed_span, zipper::{ Zipper, ZipperValues, ZipperForking, ZipperAbsolutePath, ZipperIteration, ZipperMoving, ZipperPathBuffer, ZipperReadOnlyValues, @@ -1664,10 +1665,13 @@ impl<'tree, Storage> ZipperMoving for ACTZipper<'tree, Storage> where Storage: AsRef<[u8]> { /// Returns `true` if the zipper cannot ascend further, otherwise returns `false` - fn at_root(&self) -> bool { self.path.len() <= self.origin_depth } + fn at_root(&self) -> bool { + self.path.len() <= self.origin_depth + } /// Resets the zipper's focus back to the root fn reset(&mut self) { + timed_span!(Reset); // self.ascend(self.path.len() - self.origin_depth); self.path.truncate(self.origin_depth); self.cur_node = self.tree.get_node(self.stack[0].node_id).0; @@ -1682,6 +1686,7 @@ where Storage: AsRef<[u8]> /// /// WARNING: This is not a cheap method. It may have an order-N cost fn val_count(&self) -> usize { + timed_span!(ValueCount); let mut zipper = self.clone(); zipper.reset(); let mut count = 0; @@ -1699,6 +1704,7 @@ where Storage: AsRef<[u8]> /// Returns `true` if the zipper points to an existing path within the tree, otherwise `false`. The /// zipper's location will be updated, regardless of whether or not the path exists within the tree. fn descend_to>(&mut self, path: P) -> bool { + timed_span!(DescendTo); let path = path.as_ref(); let depth = path.len(); let descended = self.descend_to_existing(path); @@ -1718,6 +1724,7 @@ where Storage: AsRef<[u8]> /// existing path after this method returns, unless the method was called with the focus on a /// non-existent path. fn descend_to_existing>(&mut self, path: P) -> usize { + timed_span!(DescendToExisting); self.descend_cond(path.as_ref(), false) } @@ -1729,12 +1736,14 @@ where Storage: AsRef<[u8]> /// If the focus is already on a value, this method will descend to the *next* value along /// the path. fn descend_to_value>(&mut self, path: K) -> usize { + timed_span!(DescendToValue); self.descend_cond(path.as_ref(), true) } /// Moves the zipper one byte deeper into the trie. Identical in effect to [descend_to](Self::descend_to) /// with a 1-byte key argument fn descend_to_byte(&mut self, k: u8) -> bool { + timed_span!(DescendToByte); self.descend_to(&[k]) } @@ -1746,6 +1755,7 @@ where Storage: AsRef<[u8]> /// to the trie. This method should only be used as part of a directed traversal operation, but /// index-based paths may not be stored as locations within the trie. fn descend_indexed_byte(&mut self, idx: usize) -> bool { + timed_span!(DescendIndexedBranch); if self.invalid > 0 { return false; } @@ -1800,12 +1810,14 @@ where Storage: AsRef<[u8]> /// NOTE: This method should have identical behavior to passing `0` to [descend_indexed_byte](ZipperMoving::descend_indexed_byte), /// although with less overhead fn descend_first_byte(&mut self) -> bool { + timed_span!(DescendFirstByte); self.descend_indexed_byte(0) } /// Descends the zipper's focus until a branch or a value is encountered. Returns `true` if the focus /// moved otherwise returns `false` fn descend_until(&mut self) -> bool { + timed_span!(DescendUntil); self.trace_pos(); let mut descended = false; 'descend: while self.child_count() == 1 { @@ -1856,6 +1868,7 @@ where Storage: AsRef<[u8]> /// If the root is fewer than `n` steps from the zipper's position, then this method will stop at /// the root and return `false` fn ascend(&mut self, mut steps: usize) -> bool { + timed_span!(Ascend); self.trace_pos(); if !self.ascend_invalid(Some(steps)) { return false; @@ -1882,12 +1895,14 @@ where Storage: AsRef<[u8]> /// Ascends the zipper up a single byte. Equivalent to passing `1` to [ascend](Self::ascend) fn ascend_byte(&mut self) -> bool { + timed_span!(AscendByte); self.ascend(1) } /// Ascends the zipper to the nearest upstream branch point or value. Returns `true` if the zipper /// focus moved upwards, otherwise returns `false` if the zipper was already at the root fn ascend_until(&mut self) -> bool { + timed_span!(AscendUntil); self.ascend_to_branch(true) } @@ -1895,16 +1910,19 @@ where Storage: AsRef<[u8]> /// `true` if the zipper focus moved upwards, otherwise returns `false` if the zipper was already at the /// root fn ascend_until_branch(&mut self) -> bool { + timed_span!(AscendUntilBranch); self.ascend_to_branch(false) } #[inline] fn to_next_sibling_byte(&mut self) -> bool { + timed_span!(ToNextSiblingByte); self.to_sibling(true) } #[inline] fn to_prev_sibling_byte(&mut self) -> bool { + timed_span!(ToPrevSiblingByte); self.to_sibling(false) } @@ -1920,6 +1938,7 @@ where Storage: AsRef<[u8]> /// /// Returns a reference to the value or `None` if the zipper has encountered the root. fn to_next_val(&mut self) -> bool { + timed_span!(ToNextVal); while self.to_next_step() { if self.is_val() { return true; @@ -1939,6 +1958,7 @@ where Storage: AsRef<[u8]> /// /// See: [to_next_k_path](ZipperIteration::to_next_k_path) fn descend_first_k_path(&mut self, k: usize) -> bool { + timed_span!(DescendFirstKPath); for ii in 0..k { if !self.descend_first_byte() { self.ascend(ii); @@ -1960,6 +1980,7 @@ where Storage: AsRef<[u8]> /// /// See: [descend_first_k_path](ZipperIteration::descend_first_k_path) fn to_next_k_path(&mut self, k: usize) -> bool { + timed_span!(ToNextKPath); let mut depth = k; 'outer: loop { while depth > 0 && self.child_count() <= 1 { diff --git a/src/lib.rs b/src/lib.rs index b1ce0b7..983ecaa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -55,6 +55,9 @@ pub mod fuzzer; #[cfg(feature = "counters")] pub mod counters; +/// Feature for code instrumentation and optimization +pub mod timed_span; + /// Shims to allow the use of a custom [`Allocator`](std::alloc::Allocator) type, if running with the `nightly` feature. Does nothing otherwise pub mod alloc; diff --git a/src/path_serialization.rs b/src/path_serialization.rs index 7e88ca8..3e5e9fb 100644 --- a/src/path_serialization.rs +++ b/src/path_serialization.rs @@ -110,7 +110,7 @@ pub fn deserialize_paths_, deserialize_paths(wz, source, |_, _| v.clone()) } -pub fn deserialize_paths, R: std::io::Read, F: Fn(usize, &[u8]) -> V>(mut wz: WZ, mut source: R, fv: F) -> std::io::Result { +pub fn deserialize_paths, R: std::io::Read, F: Fn(usize, &[u8]) -> V>(mut wz: WZ, source: R, fv: F) -> std::io::Result { let mut submap = PathMap::new_in(wz.alloc()); let r = for_each_deserialized_path(source, |k, p| { let v = fv(k, p); diff --git a/src/timed_span.rs b/src/timed_span.rs new file mode 100644 index 0000000..9f206e5 --- /dev/null +++ b/src/timed_span.rs @@ -0,0 +1,300 @@ +use std::sync::atomic::{AtomicU64, Ordering}; + +macro_rules! entries { + ( + pub enum $name:ident { + $($entry:ident $(= $value:expr)?),* + $(,)? + } + ) => { + #[derive(Clone, Copy)] + pub enum $name { + $($entry $(= $value)?),* + } + impl $name { + const ALL: &[$name] = &[$($name::$entry),*]; + const COUNT: usize = $name::ALL.len(); + pub fn to_str(&self) -> &'static str { + match self { + $( + $name::$entry => stringify!($entry) + ),* + } + } + } + + } +} + +entries!{ + pub enum Entries { + Reset, + ValueCount, + DescendTo, + DescendToExisting, + DescendToValue, + DescendToByte, + DescendIndexedBranch, + DescendFirstByte, + DescendUntil, + MoveToPath, + AscendByte, + Ascend, + ToNextSiblingByte, + ToPrevSiblingByte, + ToNextStep, + AscendUntil, + AscendUntilBranch, + ToNextVal, + DescendFirstKPath, + ToNextKPath, + ToNextGetValue, + ForkReadZipper, + } +} + +pub struct Counter { + pub count: AtomicU64, + pub cycles: AtomicU64, +} + +impl Counter { + const fn new() -> Self { + Self { + count: AtomicU64::new(0), + cycles: AtomicU64::new(0), + } + } +} + +pub static COUNTERS: [Counter; Entries::COUNT] = + [const { Counter::new() }; Entries::COUNT]; + +pub fn reset_counters() { + for counter in &COUNTERS { + counter.count.store(0, Ordering::Relaxed); + counter.cycles.store(0, Ordering::Relaxed); + } +} + +pub fn print_counters() { + println!("{:>20},Count,TSCDelta,TSCAverage", "Name"); + for &entry in Entries::ALL { + let counter = &COUNTERS[entry as usize]; + let count = counter.count.load(Ordering::Relaxed); + let cycles = counter.cycles.load(Ordering::Relaxed); + if count == 0 && cycles == 0 { continue; } + let average = cycles as f64 / count as f64; + println!("{:>20},{},{},{}", entry.to_str(), count, cycles, average); + } +} + +#[allow(dead_code)] +mod tsc { + use std::arch::asm; + use std::sync::atomic::{AtomicU64, Ordering}; + #[cfg(target_arch = "aarch64")] + #[inline] + fn read_cycle_counter() -> u64 { + let val: u64; + unsafe { + asm!( + "mrs {}, CNTVCT_EL0", + out(reg) val, + options(nostack, nomem) + ); + } + val + } + + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + #[inline] + fn read_cycle_counter() -> u64 { + let lo: u32; + let hi: u32; + unsafe { + asm!( + "rdtsc", + out("eax") lo, + out("edx") hi, + options(nostack, nomem) + ); + } + ((hi as u64) << 32) | (lo as u64) + } + + pub struct TimedSpanGuard<'a> { + start: u64, + counter: &'a AtomicU64, + } + + impl<'a> TimedSpanGuard<'a> { + pub fn new(counter: &'a AtomicU64) -> Self { + Self { + start: read_cycle_counter(), + counter, + } + } + } + + impl<'a> Drop for TimedSpanGuard<'a> { + fn drop(&mut self) { + let end = read_cycle_counter(); + self.counter.fetch_add(end - self.start, Ordering::Relaxed); + } + } +} + +#[allow(dead_code)] +mod std_instant { + use std::sync::atomic::{AtomicU64, Ordering}; + use std::time::Instant; + pub struct TimedSpanGuard<'a> { + start: Instant, + counter: &'a AtomicU64, + } + + impl<'a> TimedSpanGuard<'a> { + pub fn new(counter: &'a AtomicU64) -> Self { + Self { + start: Instant::now(), + counter, + } + } + } + + impl<'a> Drop for TimedSpanGuard<'a> { + fn drop(&mut self) { + let duration = self.start.elapsed().as_nanos() as u64; + self.counter.fetch_add(duration, Ordering::Relaxed); + } + } +} + +#[allow(dead_code)] +mod clock_monotonic { + use std::sync::atomic::{AtomicU64, Ordering}; + + #[cfg(target_os="linux")] + mod linux { + // linux/time.h + // const CLOCK_REALTIME : i32 = 0; + // const CLOCK_MONOTONIC : i32 = 1; + // const CLOCK_PROCESS_CPUTIME_ID: i32 = 2; + // const CLOCK_THREAD_CPUTIME_ID : i32 = 3; + const CLOCK_MONOTONIC_RAW : i32 = 4; + // const CLOCK_REALTIME_COARSE : i32 = 5; + const CLOCK_MONOTONIC_COARSE : i32 = 6; + // const CLOCK_BOOTTIME : i32 = 7; + // const CLOCK_REALTIME_ALARM : i32 = 8; + // const CLOCK_BOOTTIME_ALARM : i32 = 9; + + #[allow(non_camel_case_types)] + #[repr(C)] + struct timespec { + tv_sec: u64, + tv_nsec: i32, + } + + extern "C" { + fn clock_gettime(clockid: i32, tp: &mut timespec) -> i32; + } + pub fn get_time() -> u64 { + let mut time = timespec { tv_sec: 0, tv_nsec: 0 }; + let rv = unsafe { clock_gettime(CLOCK_MONOTONIC_RAW, &mut time) }; + debug_assert!(rv == 0, "failed clock_gettime?"); + time.tv_sec * 1_000_000_000 + time.tv_nsec as u64 + } + } + #[cfg(target_os="linux")] + use linux::get_time; + + #[cfg(all(target_os="macos", target_arch="aarch64"))] + mod macos { + const CLOCK_MONOTONIC_RAW : i32 = 4; + const CLOCK_MONOTONIC_RAW_APPROX: i32 = 5; + const CLOCK_MONOTONIC : i32 = 6; + // const CALENDAR_CLOCK: u64 = 1; + #[allow(non_camel_case_types)] + #[repr(C)] + struct timespec { + tv_sec: u64, + tv_nsec: i32, + } + extern "C" { + // host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); + // fn clock_get_time(clock_serv: &mut u64, cur_time: &mut mach_timespec) -> i64; + // mach_port_deallocate(mach_task_self(), cclock); + fn clock_gettime(clock_serv: i32, cur_time: &mut timespec) -> i64; + } + + pub fn get_time() -> u64 { + let mut time = timespec { tv_sec: 0, tv_nsec: 0 }; + let rv = unsafe { clock_gettime(CLOCK_MONOTONIC_RAW, &mut time) }; + debug_assert!(rv == 0, "failed clock_gettime?"); + (time.tv_sec as u64 * 1_000_000_000) + time.tv_nsec as u64 + } + } + + #[cfg(all(target_os="macos", target_arch="aarch64"))] + use macos::get_time; + + pub struct TimedSpanGuard<'a> { + start: u64, + counter: &'a AtomicU64, + } + + impl<'a> TimedSpanGuard<'a> { + pub fn new(counter: &'a AtomicU64) -> Self { + Self { + start: get_time(), + counter, + } + } + } + + impl<'a> Drop for TimedSpanGuard<'a> { + fn drop(&mut self) { + let end = get_time(); + self.counter.fetch_add(end - self.start, Ordering::Relaxed); + } + } +} + +pub use tsc::TimedSpanGuard; +pub const ENABLED: bool = true; + +macro_rules! timed_span { + ($name:ident, $dst:path) => { + let _guard = if $crate::timed_span::ENABLED { + let index = $crate::timed_span::Entries::$name as usize; + let counter = &$dst[index]; + counter.count.fetch_add(1, core::sync::atomic::Ordering::Relaxed); + Some($crate::timed_span::TimedSpanGuard::new(&counter.cycles)) + } else { + None + }; + }; + ($name:ident) => { + $crate::timed_span::timed_span!($name, $crate::timed_span::COUNTERS) + }; +} + +pub(crate) use timed_span; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_timed_span() { + reset_counters(); + { + timed_span!(Reset); + for ii in 0..100_000 { + core::hint::black_box(ii); + } + } + print_counters(); + } +} diff --git a/src/zipper.rs b/src/zipper.rs index f6d8aa6..8909de9 100644 --- a/src/zipper.rs +++ b/src/zipper.rs @@ -32,6 +32,7 @@ use crate::alloc::{Allocator, GlobalAlloc}; use crate::utils::{ByteMask, find_prefix_overlap}; use crate::trie_node::*; use crate::PathMap; +use crate::timed_span::timed_span; pub use crate::write_zipper::*; pub use crate::trie_ref::*; @@ -135,6 +136,7 @@ pub trait ZipperMoving: Zipper { /// Resets the zipper's focus back to its root fn reset(&mut self) { + timed_span!(Reset); while !self.at_root() { self.ascend_byte(); } @@ -153,6 +155,7 @@ pub trait ZipperMoving: Zipper { /// /// Returns the number of bytes shared between the old and new location and whether the new location exists in the trie fn move_to_path>(&mut self, path: K) -> (usize, bool) { + timed_span!(MoveToPath); let path = path.as_ref(); let p = self.path(); let overlap = find_prefix_overlap(path, p); @@ -179,6 +182,7 @@ pub trait ZipperMoving: Zipper { /// existing path after this method returns, unless the method was called with the focus on a /// non-existent path. fn descend_to_existing>(&mut self, k: K) -> usize { + timed_span!(DescendToExisting); let k = k.as_ref(); let mut i = 0; while i < k.len() { @@ -200,6 +204,7 @@ pub trait ZipperMoving: Zipper { /// the path. //GOAT. this default implementation could certainly be optimized fn descend_to_val>(&mut self, k: K) -> usize { + timed_span!(DescendToValue); let k = k.as_ref(); let mut i = 0; while i < k.len() { @@ -224,6 +229,7 @@ pub trait ZipperMoving: Zipper { /// Moves the zipper one byte deeper into the trie. Identical in effect to [descend_to](Self::descend_to) /// with a 1-byte key argument fn descend_to_byte(&mut self, k: u8) -> bool { + timed_span!(DescendToByte); self.descend_to(&[k]) } @@ -235,6 +241,7 @@ pub trait ZipperMoving: Zipper { /// to the trie. This method should only be used as part of a directed traversal operation, but /// index-based paths may not be stored as locations within the trie. fn descend_indexed_byte(&mut self, idx: usize) -> bool { + timed_span!(DescendIndexedBranch); let mask = self.child_mask(); let child_byte = match mask.indexed_bit::(idx) { Some(byte) => byte, @@ -258,6 +265,7 @@ pub trait ZipperMoving: Zipper { /// NOTE: This method should have identical behavior to passing `0` to [descend_indexed_byte](ZipperMoving::descend_indexed_byte), /// although with less overhead fn descend_first_byte(&mut self) -> bool { + timed_span!(DescendFirstByte); self.descend_indexed_byte(0) } @@ -267,6 +275,7 @@ pub trait ZipperMoving: Zipper { /// If there is a value at the focus, the zipper will descend to the next value or branch, however the /// zipper will not descend further if this method is called with the focus already on a branch. fn descend_until(&mut self) -> bool { + timed_span!(DescendUntil); let mut descended = false; while self.child_count() == 1 { descended = true; @@ -286,6 +295,7 @@ pub trait ZipperMoving: Zipper { /// Ascends the zipper up a single byte. Equivalent to passing `1` to [ascend](Self::ascend) fn ascend_byte(&mut self) -> bool { + timed_span!(AscendByte); self.ascend(1) } @@ -310,6 +320,7 @@ pub trait ZipperMoving: Zipper { /// This method is equivalent to calling [ZipperMoving::ascend] with `1`, followed by [ZipperMoving::descend_indexed_byte] /// where the index passed is 1 more than the index of the current focus position. fn to_next_sibling_byte(&mut self) -> bool { + timed_span!(ToNextSiblingByte); let cur_byte = match self.path().last() { Some(byte) => *byte, None => return false @@ -340,6 +351,7 @@ pub trait ZipperMoving: Zipper { /// This method is equivalent to calling [Self::ascend] with `1`, followed by [Self::descend_indexed_byte] /// where the index passed is 1 less than the index of the current focus position. fn to_prev_sibling_byte(&mut self) -> bool { + timed_span!(ToPrevSiblingByte); let cur_byte = match self.path().last() { Some(byte) => *byte, None => return false @@ -367,6 +379,7 @@ pub trait ZipperMoving: Zipper { /// Returns `true` if the position of the zipper has moved, or `false` if the zipper has returned /// to the root fn to_next_step(&mut self) -> bool { + timed_span!(ToNextStep); //If we're at a leaf ascend until we're not and jump to the next sibling if self.child_count() == 0 { @@ -1420,6 +1433,7 @@ pub(crate) mod read_zipper_core { impl ZipperForking for ReadZipperCore<'_, '_, V, A> { type ReadZipperT<'a> = ReadZipperCore<'a, 'a, V, A> where Self: 'a; fn fork_read_zipper<'a>(&'a self) -> Self::ReadZipperT<'a> { + timed_span!(ForkReadZipper); let new_root_val = self.get_val(); let new_root_path = self.origin_path(); let new_root_key_start = new_root_path.len() - self.node_key().len(); @@ -1449,6 +1463,7 @@ pub(crate) mod read_zipper_core { } fn reset(&mut self) { + timed_span!(Reset); self.ancestors.truncate(1); match self.ancestors.pop() { Some((node, _tok, _prefix_len)) => { @@ -1470,6 +1485,7 @@ pub(crate) mod read_zipper_core { } fn val_count(&self) -> usize { + timed_span!(ValueCount); if self.node_key().len() == 0 { val_count_below_root(self.focus_node) + (self.is_val() as usize) } else { @@ -1482,6 +1498,7 @@ pub(crate) mod read_zipper_core { } } fn descend_to>(&mut self, k: K) -> bool { + timed_span!(DescendTo); let k = k.as_ref(); if k.len() == 0 { return self.path_exists() //Zero-length path is a no-op @@ -1499,6 +1516,7 @@ pub(crate) mod read_zipper_core { } fn descend_to_byte(&mut self, k: u8) -> bool { + timed_span!(DescendToByte); self.prepare_buffers(); debug_assert!(self.is_regularized()); @@ -1514,6 +1532,7 @@ pub(crate) mod read_zipper_core { } fn descend_indexed_byte(&mut self, child_idx: usize) -> bool { + timed_span!(DescendIndexedBranch); self.prepare_buffers(); debug_assert!(self.is_regularized()); @@ -1535,6 +1554,7 @@ pub(crate) mod read_zipper_core { } fn descend_first_byte(&mut self) -> bool { + timed_span!(DescendFirstByte); self.prepare_buffers(); debug_assert!(self.is_regularized()); let cur_tok = self.focus_node.iter_token_for_path(self.node_key()); @@ -1571,6 +1591,7 @@ pub(crate) mod read_zipper_core { } fn descend_until(&mut self) -> bool { + timed_span!(DescendUntil); debug_assert!(self.is_regularized()); let mut moved = false; while self.child_count() == 1 { @@ -1584,6 +1605,7 @@ pub(crate) mod read_zipper_core { } fn descend_to_existing>(&mut self, k: K) -> usize { + timed_span!(DescendToExisting); let mut k = k.as_ref(); if k.len() == 0 { return 0 //Zero-length path is a no-op @@ -1649,6 +1671,7 @@ pub(crate) mod read_zipper_core { // } fn to_next_sibling_byte(&mut self) -> bool { + timed_span!(ToNextSiblingByte); self.prepare_buffers(); if self.prefix_buf.len() == 0 { return false @@ -1709,10 +1732,12 @@ pub(crate) mod read_zipper_core { } fn to_prev_sibling_byte(&mut self) -> bool { + timed_span!(ToPrevSiblingByte); self.to_sibling(false) } fn ascend(&mut self, mut steps: usize) -> bool { + timed_span!(Ascend); debug_assert!(self.is_regularized()); while steps > 0 { if self.excess_key_len() == 0 { @@ -1736,6 +1761,7 @@ pub(crate) mod read_zipper_core { } fn ascend_byte(&mut self) -> bool { + timed_span!(AscendByte); debug_assert!(self.is_regularized()); if self.excess_key_len() == 0 { match self.ancestors.pop() { @@ -1755,6 +1781,7 @@ pub(crate) mod read_zipper_core { } fn ascend_until(&mut self) -> bool { + timed_span!(AscendUntil); debug_assert!(self.is_regularized()); if self.at_root() { return false; @@ -1771,6 +1798,7 @@ pub(crate) mod read_zipper_core { } fn ascend_until_branch(&mut self) -> bool { + timed_span!(AscendUntilBranch); debug_assert!(self.is_regularized()); if self.at_root() { return false; @@ -1936,9 +1964,11 @@ pub(crate) mod read_zipper_core { impl<'trie, V: Clone + Send + Sync + Unpin + 'trie, A: Allocator + 'trie> ZipperIteration for ReadZipperCore<'trie, '_, V, A> { fn to_next_val(&mut self) -> bool { + timed_span!(ToNextVal); self.to_next_get_val().is_some() } fn descend_first_k_path(&mut self, k: usize) -> bool { + timed_span!(DescendFirstKPath); self.prepare_buffers(); debug_assert!(self.is_regularized()); @@ -1948,6 +1978,7 @@ pub(crate) mod read_zipper_core { self.k_path_internal(k, self.prefix_buf.len()) } fn to_next_k_path(&mut self, k: usize) -> bool { + timed_span!(ToNextKPath); let base_idx = if self.path_len() >= k { self.prefix_buf.len() - k } else { @@ -1962,6 +1993,7 @@ pub(crate) mod read_zipper_core { impl<'a, V: Clone + Send + Sync + Unpin + 'a, A: Allocator + 'a> ZipperReadOnlyIteration<'a, V> for ReadZipperCore<'a, '_, V, A> { fn to_next_get_val(&mut self) -> Option<&'a V> { + timed_span!(ToNextGetValue); self.prepare_buffers(); loop { if self.focus_iter_token == NODE_ITER_INVALID {