ripgrep-all/src/preproc_cache.rs

134 lines
5.0 KiB
Rust
Raw Normal View History

2020-09-10 15:18:11 +00:00
use crate::{config::CacheConfig, print_bytes, print_dur};
use anyhow::{format_err, Context, Result};
2019-06-07 21:17:33 +00:00
use log::*;
2021-08-26 12:54:42 +00:00
use rkv::backend::{BackendEnvironmentBuilder, LmdbEnvironment};
2020-09-10 15:18:11 +00:00
use std::{fmt::Display, path::Path, time::Instant};
2019-06-07 17:00:24 +00:00
2020-06-11 21:09:31 +00:00
pub trait PreprocCache: Send + Sync {
2020-06-17 09:43:47 +00:00
/*/// gets cache at specified key.
/// if cache hit, return the resulting data
/// else, run the given lambda, and store its result in the cache if present
2019-06-07 17:00:24 +00:00
fn get_or_run<'a>(
&mut self,
db_name: &str,
key: &[u8],
2020-06-17 09:43:47 +00:00
debug_name: &str,
runner: Box<dyn FnOnce() -> Result<Option<Vec<u8>>> + 'a>,
2020-06-17 09:43:47 +00:00
) -> Result<Option<Vec<u8>>>;*/
fn get(&self, db_name: &str, key: &[u8]) -> Result<Option<Vec<u8>>>;
fn set(&mut self, db_name: &str, key: &[u8], value: &[u8]) -> Result<()>;
2019-06-07 17:00:24 +00:00
}
/// opens a LMDB cache
2021-08-26 12:54:42 +00:00
fn open_cache_db(path: &Path) -> Result<std::sync::Arc<std::sync::RwLock<rkv::Rkv<LmdbEnvironment>>>> {
2020-06-17 09:43:47 +00:00
std::fs::create_dir_all(path)?;
2021-08-26 12:54:42 +00:00
use rkv::backend::LmdbEnvironmentFlags;
2019-06-07 17:00:24 +00:00
2021-08-26 12:54:42 +00:00
rkv::Manager::<LmdbEnvironment>::singleton()
2019-06-07 17:00:24 +00:00
.write()
2020-06-06 11:06:19 +00:00
.map_err(|_| format_err!("could not write cache db manager"))?
2020-06-17 09:43:47 +00:00
.get_or_create(path, |p| {
2021-08-26 12:54:42 +00:00
let mut builder = rkv::Rkv::environment_builder::<rkv::backend::Lmdb>();
2019-06-07 17:00:24 +00:00
builder
2021-08-26 12:54:42 +00:00
.set_flags(rkv::EnvironmentFlags::NO_SYNC)
.set_flags(rkv::EnvironmentFlags::WRITE_MAP) // not durable cuz it's a cache
2019-06-07 17:00:24 +00:00
// i'm not sure why NO_TLS is needed. otherwise LMDB transactions (open readers) will keep piling up until it fails with
2019-06-14 14:20:48 +00:00
// LmdbError(ReadersFull). Those "open readers" stay even after the corresponding processes exit.
// hope setting this doesn't break integrity
2019-06-07 17:00:24 +00:00
.set_flags(rkv::EnvironmentFlags::NO_TLS)
2019-06-14 14:20:48 +00:00
// sometimes, this seems to cause the data.mdb file to appear as 2GB in size (with holes), but sometimes not?
2019-06-07 17:00:24 +00:00
.set_map_size(2 * 1024 * 1024 * 1024)
.set_max_dbs(100)
.set_max_readers(128);
2021-08-26 12:54:42 +00:00
rkv::Rkv::from_builder(p, builder)
2019-06-07 17:00:24 +00:00
})
2020-06-06 11:06:19 +00:00
.map_err(|e| format_err!("could not get/create cache db: {}", e))
2019-06-07 17:00:24 +00:00
}
pub struct LmdbCache {
2021-08-26 12:54:42 +00:00
db_arc: std::sync::Arc<std::sync::RwLock<rkv::Rkv<LmdbEnvironment>>>,
2019-06-07 17:00:24 +00:00
}
impl LmdbCache {
2020-06-17 09:43:47 +00:00
pub fn open(config: &CacheConfig) -> Result<Option<LmdbCache>> {
if config.disabled {
return Ok(None);
}
let path = Path::new(&config.path.0);
Ok(Some(LmdbCache {
db_arc: open_cache_db(&path)?,
}))
2019-06-07 17:00:24 +00:00
}
}
#[derive(Debug)]
struct RkvErrWrap(rkv::StoreError);
impl Display for RkvErrWrap {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::error::Error for RkvErrWrap {}
2019-06-07 17:00:24 +00:00
impl PreprocCache for LmdbCache {
2020-06-17 09:43:47 +00:00
fn get(&self, db_name: &str, key: &[u8]) -> Result<Option<Vec<u8>>> {
2020-06-09 10:47:34 +00:00
let start = Instant::now();
2020-06-17 09:43:47 +00:00
let db_env = self
.db_arc
.read()
.map_err(|_| anyhow::anyhow!("Could not open lock, some lock writer panicked"))?;
2019-06-07 17:00:24 +00:00
let db = db_env
.open_single(db_name, rkv::store::Options::create())
.map_err(RkvErrWrap)
2020-06-17 09:43:47 +00:00
.context("could not open cache db store")?;
2019-06-07 17:00:24 +00:00
let reader = db_env.read().expect("could not get reader");
let cached = db
.get(&reader, &key)
.map_err(RkvErrWrap)
2020-06-17 09:43:47 +00:00
.context("could not read from db")?;
2019-06-07 17:00:24 +00:00
match cached {
Some(rkv::Value::Blob(cached)) => {
2020-06-09 20:45:42 +00:00
debug!(
"cache HIT, reading {} (compressed) from cache",
print_bytes(cached.len() as f64)
);
2020-06-09 10:47:34 +00:00
debug!("reading from cache took {}", print_dur(start));
2020-06-17 09:43:47 +00:00
Ok(Some(Vec::from(cached)))
2019-06-07 17:00:24 +00:00
}
Some(_) => Err(format_err!("Integrity: value not blob"))?,
2020-06-17 09:43:47 +00:00
None => Ok(None),
}
}
fn set(&mut self, db_name: &str, key: &[u8], got: &[u8]) -> Result<()> {
let start = Instant::now();
debug!("writing {} to cache", print_bytes(got.len() as f64));
let db_env = self
.db_arc
.read()
.map_err(|_| anyhow::anyhow!("Could not open lock, some lock writer panicked"))?;
let db = db_env
.open_single(db_name, rkv::store::Options::create())
.map_err(RkvErrWrap)
.context("could not open cache db store")?;
let mut writer = db_env
.write()
.map_err(RkvErrWrap)
.with_context(|| format_err!("could not open write handle to cache"))?;
db.put(&mut writer, &key, &rkv::Value::Blob(&got))
.map_err(RkvErrWrap)
.with_context(|| format_err!("could not write to cache"))?;
writer
.commit()
.map_err(RkvErrWrap)
.with_context(|| format!("could not write cache"))?;
debug!("writing to cache took {}", print_dur(start));
2019-06-07 17:00:24 +00:00
Ok(())
}
}