linera_views/backends/
rocks_db.rs

1// Copyright (c) Zefchain Labs, Inc.
2// SPDX-License-Identifier: Apache-2.0
3
4//! Implements [`crate::store::KeyValueStore`] for the RocksDB database.
5
6use std::{
7    ffi::OsString,
8    fmt::Display,
9    path::PathBuf,
10    sync::{
11        atomic::{AtomicBool, Ordering},
12        Arc,
13    },
14};
15
16use linera_base::ensure;
17use rocksdb::{BlockBasedOptions, Cache, DBCompactionStyle};
18use serde::{Deserialize, Serialize};
19use sysinfo::{CpuRefreshKind, MemoryRefreshKind, RefreshKind, System};
20use tempfile::TempDir;
21use thiserror::Error;
22
23#[cfg(with_metrics)]
24use crate::metering::MeteredStore;
25#[cfg(with_testing)]
26use crate::store::TestKeyValueStore;
27use crate::{
28    batch::{Batch, WriteOperation},
29    common::get_upper_bound_option,
30    lru_caching::{LruCachingConfig, LruCachingStore},
31    store::{
32        AdminKeyValueStore, KeyValueStoreError, ReadableKeyValueStore, WithError,
33        WritableKeyValueStore,
34    },
35    value_splitting::{ValueSplittingError, ValueSplittingStore},
36};
37
38/// The prefixes being used in the system
39static ROOT_KEY_DOMAIN: [u8; 1] = [0];
40static STORED_ROOT_KEYS_PREFIX: u8 = 1;
41
42/// The number of streams for the test
43#[cfg(with_testing)]
44const TEST_ROCKS_DB_MAX_STREAM_QUERIES: usize = 10;
45
46// The maximum size of values in RocksDB is 3 GiB
47// For offset reasons we decrease by 400
48const MAX_VALUE_SIZE: usize = 3 * 1024 * 1024 * 1024 - 400;
49
50// The maximum size of keys in RocksDB is 8 MiB
51// For offset reasons we decrease by 400
52const MAX_KEY_SIZE: usize = 8 * 1024 * 1024 - 400;
53
54const WRITE_BUFFER_SIZE: usize = 256 * 1024 * 1024; // 256 MiB
55const MAX_WRITE_BUFFER_NUMBER: i32 = 6;
56const HYPER_CLOCK_CACHE_BLOCK_SIZE: usize = 8 * 1024; // 8 KiB
57
58/// The RocksDB client that we use.
59type DB = rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>;
60
61/// The choice of the spawning mode.
62/// `SpawnBlocking` always works and is the safest.
63/// `BlockInPlace` can only be used in multi-threaded environment.
64/// One way to select that is to select BlockInPlace when
65/// `tokio::runtime::Handle::current().metrics().num_workers() > 1`
66/// `BlockInPlace` is documented in <https://docs.rs/tokio/latest/tokio/task/fn.block_in_place.html>
67#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)]
68pub enum RocksDbSpawnMode {
69    /// This uses the `spawn_blocking` function of Tokio.
70    SpawnBlocking,
71    /// This uses the `block_in_place` function of Tokio.
72    BlockInPlace,
73}
74
75impl RocksDbSpawnMode {
76    /// Obtains the spawning mode from runtime.
77    pub fn get_spawn_mode_from_runtime() -> Self {
78        if tokio::runtime::Handle::current().metrics().num_workers() > 1 {
79            RocksDbSpawnMode::BlockInPlace
80        } else {
81            RocksDbSpawnMode::SpawnBlocking
82        }
83    }
84
85    /// Runs the computation for a function according to the selected policy.
86    #[inline]
87    async fn spawn<F, I, O>(&self, f: F, input: I) -> Result<O, RocksDbStoreInternalError>
88    where
89        F: FnOnce(I) -> Result<O, RocksDbStoreInternalError> + Send + 'static,
90        I: Send + 'static,
91        O: Send + 'static,
92    {
93        Ok(match self {
94            RocksDbSpawnMode::BlockInPlace => tokio::task::block_in_place(move || f(input))?,
95            RocksDbSpawnMode::SpawnBlocking => {
96                tokio::task::spawn_blocking(move || f(input)).await??
97            }
98        })
99    }
100}
101
102impl Display for RocksDbSpawnMode {
103    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
104        match &self {
105            RocksDbSpawnMode::SpawnBlocking => write!(f, "spawn_blocking"),
106            RocksDbSpawnMode::BlockInPlace => write!(f, "block_in_place"),
107        }
108    }
109}
110
111fn check_key_size(key: &[u8]) -> Result<(), RocksDbStoreInternalError> {
112    ensure!(
113        key.len() <= MAX_KEY_SIZE,
114        RocksDbStoreInternalError::KeyTooLong
115    );
116    Ok(())
117}
118
119#[derive(Clone)]
120struct RocksDbStoreExecutor {
121    db: Arc<DB>,
122    start_key: Vec<u8>,
123}
124
125impl RocksDbStoreExecutor {
126    pub fn contains_keys_internal(
127        &self,
128        keys: Vec<Vec<u8>>,
129    ) -> Result<Vec<bool>, RocksDbStoreInternalError> {
130        let size = keys.len();
131        let mut results = vec![false; size];
132        let mut indices = Vec::new();
133        let mut keys_red = Vec::new();
134        for (i, key) in keys.into_iter().enumerate() {
135            check_key_size(&key)?;
136            let mut full_key = self.start_key.to_vec();
137            full_key.extend(key);
138            if self.db.key_may_exist(&full_key) {
139                indices.push(i);
140                keys_red.push(full_key);
141            }
142        }
143        let values_red = self.db.multi_get(keys_red);
144        for (index, value) in indices.into_iter().zip(values_red) {
145            results[index] = value?.is_some();
146        }
147        Ok(results)
148    }
149
150    fn read_multi_values_bytes_internal(
151        &self,
152        keys: Vec<Vec<u8>>,
153    ) -> Result<Vec<Option<Vec<u8>>>, RocksDbStoreInternalError> {
154        for key in &keys {
155            check_key_size(key)?;
156        }
157        let full_keys = keys
158            .into_iter()
159            .map(|key| {
160                let mut full_key = self.start_key.to_vec();
161                full_key.extend(key);
162                full_key
163            })
164            .collect::<Vec<_>>();
165        let entries = self.db.multi_get(&full_keys);
166        Ok(entries.into_iter().collect::<Result<_, _>>()?)
167    }
168
169    fn find_keys_by_prefix_internal(
170        &self,
171        key_prefix: Vec<u8>,
172    ) -> Result<Vec<Vec<u8>>, RocksDbStoreInternalError> {
173        check_key_size(&key_prefix)?;
174        let mut prefix = self.start_key.clone();
175        prefix.extend(key_prefix);
176        let len = prefix.len();
177        let mut iter = self.db.raw_iterator();
178        let mut keys = Vec::new();
179        iter.seek(&prefix);
180        let mut next_key = iter.key();
181        while let Some(key) = next_key {
182            if !key.starts_with(&prefix) {
183                break;
184            }
185            keys.push(key[len..].to_vec());
186            iter.next();
187            next_key = iter.key();
188        }
189        Ok(keys)
190    }
191
192    #[expect(clippy::type_complexity)]
193    fn find_key_values_by_prefix_internal(
194        &self,
195        key_prefix: Vec<u8>,
196    ) -> Result<Vec<(Vec<u8>, Vec<u8>)>, RocksDbStoreInternalError> {
197        check_key_size(&key_prefix)?;
198        let mut prefix = self.start_key.clone();
199        prefix.extend(key_prefix);
200        let len = prefix.len();
201        let mut iter = self.db.raw_iterator();
202        let mut key_values = Vec::new();
203        iter.seek(&prefix);
204        let mut next_key = iter.key();
205        while let Some(key) = next_key {
206            if !key.starts_with(&prefix) {
207                break;
208            }
209            if let Some(value) = iter.value() {
210                let key_value = (key[len..].to_vec(), value.to_vec());
211                key_values.push(key_value);
212            }
213            iter.next();
214            next_key = iter.key();
215        }
216        Ok(key_values)
217    }
218
219    fn write_batch_internal(
220        &self,
221        batch: Batch,
222        write_root_key: bool,
223    ) -> Result<(), RocksDbStoreInternalError> {
224        let mut inner_batch = rocksdb::WriteBatchWithTransaction::default();
225        for operation in batch.operations {
226            match operation {
227                WriteOperation::Delete { key } => {
228                    check_key_size(&key)?;
229                    let mut full_key = self.start_key.to_vec();
230                    full_key.extend(key);
231                    inner_batch.delete(&full_key)
232                }
233                WriteOperation::Put { key, value } => {
234                    check_key_size(&key)?;
235                    let mut full_key = self.start_key.to_vec();
236                    full_key.extend(key);
237                    inner_batch.put(&full_key, value)
238                }
239                WriteOperation::DeletePrefix { key_prefix } => {
240                    check_key_size(&key_prefix)?;
241                    let mut full_key1 = self.start_key.to_vec();
242                    full_key1.extend(&key_prefix);
243                    let full_key2 =
244                        get_upper_bound_option(&full_key1).expect("the first entry cannot be 255");
245                    inner_batch.delete_range(&full_key1, &full_key2);
246                }
247            }
248        }
249        if write_root_key {
250            let mut full_key = self.start_key.to_vec();
251            full_key[0] = STORED_ROOT_KEYS_PREFIX;
252            inner_batch.put(&full_key, vec![]);
253        }
254        self.db.write(inner_batch)?;
255        Ok(())
256    }
257}
258
259/// The inner client
260#[derive(Clone)]
261pub struct RocksDbStoreInternal {
262    executor: RocksDbStoreExecutor,
263    _path_with_guard: PathWithGuard,
264    max_stream_queries: usize,
265    spawn_mode: RocksDbSpawnMode,
266    root_key_written: Arc<AtomicBool>,
267}
268
269/// The initial configuration of the system
270#[derive(Clone, Debug, Deserialize, Serialize)]
271pub struct RocksDbStoreInternalConfig {
272    /// The path to the storage containing the namespaces
273    pub path_with_guard: PathWithGuard,
274    /// The chosen spawn mode
275    pub spawn_mode: RocksDbSpawnMode,
276    /// Preferred buffer size for async streams.
277    pub max_stream_queries: usize,
278}
279
280impl RocksDbStoreInternal {
281    fn check_namespace(namespace: &str) -> Result<(), RocksDbStoreInternalError> {
282        if !namespace
283            .chars()
284            .all(|character| character.is_ascii_alphanumeric() || character == '_')
285        {
286            return Err(RocksDbStoreInternalError::InvalidNamespace);
287        }
288        Ok(())
289    }
290
291    fn build(
292        config: &RocksDbStoreInternalConfig,
293        namespace: &str,
294        start_key: Vec<u8>,
295    ) -> Result<RocksDbStoreInternal, RocksDbStoreInternalError> {
296        Self::check_namespace(namespace)?;
297        let mut path_buf = config.path_with_guard.path_buf.clone();
298        let mut path_with_guard = config.path_with_guard.clone();
299        path_buf.push(namespace);
300        path_with_guard.path_buf = path_buf.clone();
301        let max_stream_queries = config.max_stream_queries;
302        let spawn_mode = config.spawn_mode;
303        if !std::path::Path::exists(&path_buf) {
304            std::fs::create_dir(path_buf.clone())?;
305        }
306        let sys = System::new_with_specifics(
307            RefreshKind::nothing()
308                .with_cpu(CpuRefreshKind::everything())
309                .with_memory(MemoryRefreshKind::nothing().with_ram()),
310        );
311        let num_cpus = sys.cpus().len() as i32;
312        let total_ram = sys.total_memory() as usize;
313        let mut options = rocksdb::Options::default();
314        options.create_if_missing(true);
315        options.create_missing_column_families(true);
316        // Flush in-memory buffer to disk more often
317        options.set_write_buffer_size(WRITE_BUFFER_SIZE);
318        options.set_max_write_buffer_number(MAX_WRITE_BUFFER_NUMBER);
319        options.set_compression_type(rocksdb::DBCompressionType::Lz4);
320        options.set_level_zero_slowdown_writes_trigger(8);
321        options.set_level_zero_stop_writes_trigger(12);
322        options.set_level_zero_file_num_compaction_trigger(2);
323        // We deliberately give RocksDB one background thread *per* CPU so that
324        // flush + (N-1) compactions can hammer the NVMe at full bandwidth while
325        // still leaving enough CPU time for the foreground application threads.
326        options.increase_parallelism(num_cpus);
327        options.set_max_background_jobs(num_cpus);
328        options.set_max_subcompactions(num_cpus as u32);
329        options.set_level_compaction_dynamic_level_bytes(true);
330
331        options.set_compaction_style(DBCompactionStyle::Level);
332        options.set_target_file_size_base(2 * WRITE_BUFFER_SIZE as u64);
333
334        let mut block_options = BlockBasedOptions::default();
335        block_options.set_pin_l0_filter_and_index_blocks_in_cache(true);
336        block_options.set_cache_index_and_filter_blocks(true);
337        // Allocate 1/4 of total RAM for RocksDB block cache, which is a reasonable balance:
338        // - Large enough to significantly improve read performance by caching frequently accessed blocks
339        // - Small enough to leave memory for other system components
340        // - Follows common practice for database caching in server environments
341        // - Prevents excessive memory pressure that could lead to swapping or OOM conditions
342        block_options.set_block_cache(&Cache::new_hyper_clock_cache(
343            total_ram / 4,
344            HYPER_CLOCK_CACHE_BLOCK_SIZE,
345        ));
346        options.set_block_based_table_factory(&block_options);
347
348        let db = DB::open(&options, path_buf)?;
349        let executor = RocksDbStoreExecutor {
350            db: Arc::new(db),
351            start_key,
352        };
353        Ok(RocksDbStoreInternal {
354            executor,
355            _path_with_guard: path_with_guard,
356            max_stream_queries,
357            spawn_mode,
358            root_key_written: Arc::new(AtomicBool::new(false)),
359        })
360    }
361}
362
363impl WithError for RocksDbStoreInternal {
364    type Error = RocksDbStoreInternalError;
365}
366
367impl ReadableKeyValueStore for RocksDbStoreInternal {
368    const MAX_KEY_SIZE: usize = MAX_KEY_SIZE;
369
370    fn max_stream_queries(&self) -> usize {
371        self.max_stream_queries
372    }
373
374    async fn read_value_bytes(
375        &self,
376        key: &[u8],
377    ) -> Result<Option<Vec<u8>>, RocksDbStoreInternalError> {
378        check_key_size(key)?;
379        let db = self.executor.db.clone();
380        let mut full_key = self.executor.start_key.to_vec();
381        full_key.extend(key);
382        self.spawn_mode
383            .spawn(move |x| Ok(db.get(&x)?), full_key)
384            .await
385    }
386
387    async fn contains_key(&self, key: &[u8]) -> Result<bool, RocksDbStoreInternalError> {
388        check_key_size(key)?;
389        let db = self.executor.db.clone();
390        let mut full_key = self.executor.start_key.to_vec();
391        full_key.extend(key);
392        self.spawn_mode
393            .spawn(
394                move |x| {
395                    if !db.key_may_exist(&x) {
396                        return Ok(false);
397                    }
398                    Ok(db.get(&x)?.is_some())
399                },
400                full_key,
401            )
402            .await
403    }
404
405    async fn contains_keys(
406        &self,
407        keys: Vec<Vec<u8>>,
408    ) -> Result<Vec<bool>, RocksDbStoreInternalError> {
409        let executor = self.executor.clone();
410        self.spawn_mode
411            .spawn(move |x| executor.contains_keys_internal(x), keys)
412            .await
413    }
414
415    async fn read_multi_values_bytes(
416        &self,
417        keys: Vec<Vec<u8>>,
418    ) -> Result<Vec<Option<Vec<u8>>>, RocksDbStoreInternalError> {
419        let executor = self.executor.clone();
420        self.spawn_mode
421            .spawn(move |x| executor.read_multi_values_bytes_internal(x), keys)
422            .await
423    }
424
425    async fn find_keys_by_prefix(
426        &self,
427        key_prefix: &[u8],
428    ) -> Result<Vec<Vec<u8>>, RocksDbStoreInternalError> {
429        let executor = self.executor.clone();
430        let key_prefix = key_prefix.to_vec();
431        self.spawn_mode
432            .spawn(
433                move |x| executor.find_keys_by_prefix_internal(x),
434                key_prefix,
435            )
436            .await
437    }
438
439    async fn find_key_values_by_prefix(
440        &self,
441        key_prefix: &[u8],
442    ) -> Result<Vec<(Vec<u8>, Vec<u8>)>, RocksDbStoreInternalError> {
443        let executor = self.executor.clone();
444        let key_prefix = key_prefix.to_vec();
445        self.spawn_mode
446            .spawn(
447                move |x| executor.find_key_values_by_prefix_internal(x),
448                key_prefix,
449            )
450            .await
451    }
452}
453
454impl WritableKeyValueStore for RocksDbStoreInternal {
455    const MAX_VALUE_SIZE: usize = MAX_VALUE_SIZE;
456
457    async fn write_batch(&self, batch: Batch) -> Result<(), RocksDbStoreInternalError> {
458        let write_root_key = !self.root_key_written.fetch_or(true, Ordering::SeqCst);
459        let executor = self.executor.clone();
460        self.spawn_mode
461            .spawn(
462                move |x| executor.write_batch_internal(x, write_root_key),
463                batch,
464            )
465            .await
466    }
467
468    async fn clear_journal(&self) -> Result<(), RocksDbStoreInternalError> {
469        Ok(())
470    }
471}
472
473impl AdminKeyValueStore for RocksDbStoreInternal {
474    type Config = RocksDbStoreInternalConfig;
475
476    fn get_name() -> String {
477        "rocksdb internal".to_string()
478    }
479
480    async fn connect(
481        config: &Self::Config,
482        namespace: &str,
483    ) -> Result<Self, RocksDbStoreInternalError> {
484        let start_key = ROOT_KEY_DOMAIN.to_vec();
485        RocksDbStoreInternal::build(config, namespace, start_key)
486    }
487
488    fn open_exclusive(&self, root_key: &[u8]) -> Result<Self, RocksDbStoreInternalError> {
489        let mut store = self.clone();
490        let mut start_key = ROOT_KEY_DOMAIN.to_vec();
491        start_key.extend(root_key);
492        store.executor.start_key = start_key;
493        store.root_key_written = Arc::new(AtomicBool::new(false));
494        Ok(store)
495    }
496
497    async fn list_all(config: &Self::Config) -> Result<Vec<String>, RocksDbStoreInternalError> {
498        let entries = std::fs::read_dir(config.path_with_guard.path_buf.clone())?;
499        let mut namespaces = Vec::new();
500        for entry in entries {
501            let entry = entry?;
502            if !entry.file_type()?.is_dir() {
503                return Err(RocksDbStoreInternalError::NonDirectoryNamespace);
504            }
505            let namespace = match entry.file_name().into_string() {
506                Err(error) => {
507                    return Err(RocksDbStoreInternalError::IntoStringError(error));
508                }
509                Ok(namespace) => namespace,
510            };
511            namespaces.push(namespace);
512        }
513        Ok(namespaces)
514    }
515
516    async fn list_root_keys(
517        config: &Self::Config,
518        namespace: &str,
519    ) -> Result<Vec<Vec<u8>>, RocksDbStoreInternalError> {
520        let start_key = vec![STORED_ROOT_KEYS_PREFIX];
521        let store = RocksDbStoreInternal::build(config, namespace, start_key)?;
522        store.find_keys_by_prefix(&[]).await
523    }
524
525    async fn delete_all(config: &Self::Config) -> Result<(), RocksDbStoreInternalError> {
526        let namespaces = RocksDbStoreInternal::list_all(config).await?;
527        for namespace in namespaces {
528            let mut path_buf = config.path_with_guard.path_buf.clone();
529            path_buf.push(&namespace);
530            std::fs::remove_dir_all(path_buf.as_path())?;
531        }
532        Ok(())
533    }
534
535    async fn exists(
536        config: &Self::Config,
537        namespace: &str,
538    ) -> Result<bool, RocksDbStoreInternalError> {
539        Self::check_namespace(namespace)?;
540        let mut path_buf = config.path_with_guard.path_buf.clone();
541        path_buf.push(namespace);
542        let test = std::path::Path::exists(&path_buf);
543        Ok(test)
544    }
545
546    async fn create(
547        config: &Self::Config,
548        namespace: &str,
549    ) -> Result<(), RocksDbStoreInternalError> {
550        Self::check_namespace(namespace)?;
551        let mut path_buf = config.path_with_guard.path_buf.clone();
552        path_buf.push(namespace);
553        if std::path::Path::exists(&path_buf) {
554            return Err(RocksDbStoreInternalError::StoreAlreadyExists);
555        }
556        std::fs::create_dir_all(path_buf)?;
557        Ok(())
558    }
559
560    async fn delete(
561        config: &Self::Config,
562        namespace: &str,
563    ) -> Result<(), RocksDbStoreInternalError> {
564        Self::check_namespace(namespace)?;
565        let mut path_buf = config.path_with_guard.path_buf.clone();
566        path_buf.push(namespace);
567        let path = path_buf.as_path();
568        std::fs::remove_dir_all(path)?;
569        Ok(())
570    }
571}
572
573#[cfg(with_testing)]
574impl TestKeyValueStore for RocksDbStoreInternal {
575    async fn new_test_config() -> Result<RocksDbStoreInternalConfig, RocksDbStoreInternalError> {
576        let path_with_guard = PathWithGuard::new_testing();
577        let spawn_mode = RocksDbSpawnMode::get_spawn_mode_from_runtime();
578        let max_stream_queries = TEST_ROCKS_DB_MAX_STREAM_QUERIES;
579        Ok(RocksDbStoreInternalConfig {
580            path_with_guard,
581            spawn_mode,
582            max_stream_queries,
583        })
584    }
585}
586
587/// The error type for [`RocksDbStoreInternal`]
588#[derive(Error, Debug)]
589pub enum RocksDbStoreInternalError {
590    /// Store already exists
591    #[error("Store already exists")]
592    StoreAlreadyExists,
593
594    /// Tokio join error in RocksDB.
595    #[error("tokio join error: {0}")]
596    TokioJoinError(#[from] tokio::task::JoinError),
597
598    /// RocksDB error.
599    #[error("RocksDB error: {0}")]
600    RocksDb(#[from] rocksdb::Error),
601
602    /// The database contains a file which is not a directory
603    #[error("Namespaces should be directories")]
604    NonDirectoryNamespace,
605
606    /// Error converting `OsString` to `String`
607    #[error("error in the conversion from OsString: {0:?}")]
608    IntoStringError(OsString),
609
610    /// The key must have at most 8 MiB
611    #[error("The key must have at most 8 MiB")]
612    KeyTooLong,
613
614    /// Namespace contains forbidden characters
615    #[error("Namespace contains forbidden characters")]
616    InvalidNamespace,
617
618    /// Filesystem error
619    #[error("Filesystem error: {0}")]
620    FsError(#[from] std::io::Error),
621
622    /// BCS serialization error.
623    #[error(transparent)]
624    BcsError(#[from] bcs::Error),
625}
626
627/// A path and the guard for the temporary directory if needed
628#[derive(Clone, Debug, Deserialize, Serialize)]
629pub struct PathWithGuard {
630    /// The path to the data
631    pub path_buf: PathBuf,
632    /// The guard for the directory if one is needed
633    #[serde(skip)]
634    _dir: Option<Arc<TempDir>>,
635}
636
637impl PathWithGuard {
638    /// Creates a `PathWithGuard` from an existing path.
639    pub fn new(path_buf: PathBuf) -> Self {
640        Self {
641            path_buf,
642            _dir: None,
643        }
644    }
645
646    /// Returns the test path for RocksDB without common config.
647    #[cfg(with_testing)]
648    fn new_testing() -> PathWithGuard {
649        let dir = TempDir::new().unwrap();
650        let path_buf = dir.path().to_path_buf();
651        let _dir = Some(Arc::new(dir));
652        PathWithGuard { path_buf, _dir }
653    }
654}
655
656impl PartialEq for PathWithGuard {
657    fn eq(&self, other: &Self) -> bool {
658        self.path_buf == other.path_buf
659    }
660}
661impl Eq for PathWithGuard {}
662
663impl KeyValueStoreError for RocksDbStoreInternalError {
664    const BACKEND: &'static str = "rocks_db";
665}
666
667/// The `RocksDbStore` composed type with metrics
668#[cfg(with_metrics)]
669pub type RocksDbStore = MeteredStore<
670    LruCachingStore<MeteredStore<ValueSplittingStore<MeteredStore<RocksDbStoreInternal>>>>,
671>;
672
673/// The `RocksDbStore` composed type
674#[cfg(not(with_metrics))]
675pub type RocksDbStore = LruCachingStore<ValueSplittingStore<RocksDbStoreInternal>>;
676
677/// The composed error type for the `RocksDbStore`
678pub type RocksDbStoreError = ValueSplittingError<RocksDbStoreInternalError>;
679
680/// The composed config type for the `RocksDbStore`
681pub type RocksDbStoreConfig = LruCachingConfig<RocksDbStoreInternalConfig>;