don't require db lock on connect / cache.get

This commit is contained in:
phiresky 2023-06-04 18:11:18 +02:00
parent f5285ac5cd
commit 58345767dc
4 changed files with 24 additions and 14 deletions

BIN
exampledir/exif.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

View File

@ -1,6 +1,6 @@
use std::{future::Future, pin::Pin}; use std::{future::Future, pin::Pin};
use anyhow::Result; use anyhow::{Context, Result};
use async_compression::tokio::write::ZstdEncoder; use async_compression::tokio::write::ZstdEncoder;
use async_stream::stream; use async_stream::stream;
@ -64,7 +64,7 @@ pub fn async_read_and_write_to_cache<'a>(
}; };
// EOF, finish! // EOF, finish!
on_finish(finish).await on_finish(finish).await.context("write_to_cache on_finish")
.map_err(to_io_err)?; .map_err(to_io_err)?;
}; };

View File

@ -151,7 +151,7 @@ async fn adapt_caching(
let mut cache = cache.context("No cache?")?; let mut cache = cache.context("No cache?")?;
let cache_key = CacheKey::new(&ai.filepath_hint, adapter.as_ref(), &active_adapters)?; let cache_key = CacheKey::new(&ai.filepath_hint, adapter.as_ref(), &active_adapters)?;
// let dbg_ctx = format!("adapter {}", &adapter.metadata().name); // let dbg_ctx = format!("adapter {}", &adapter.metadata().name);
let cached = cache.get(&cache_key).await?; let cached = cache.get(&cache_key).await.context("cache.get")?;
match cached { match cached {
Some(cached) => Ok(Box::pin(ZstdDecoder::new(Cursor::new(cached)))), Some(cached) => Ok(Box::pin(ZstdDecoder::new(Cursor::new(cached)))),
None => { None => {

View File

@ -49,7 +49,7 @@ pub trait PreprocCache {
async fn set(&mut self, key: &CacheKey, value: Vec<u8>) -> Result<()>; async fn set(&mut self, key: &CacheKey, value: Vec<u8>) -> Result<()>;
} }
async fn pragmas(db: &Connection) -> Result<()> { async fn connect_pragmas(db: &Connection) -> Result<()> {
// https://phiresky.github.io/blog/2020/sqlite-performance-tuning/ // https://phiresky.github.io/blog/2020/sqlite-performance-tuning/
//let want_page_size = 32768; //let want_page_size = 32768;
//db.execute(&format!("pragma page_size = {};", want_page_size)) //db.execute(&format!("pragma page_size = {};", want_page_size))
@ -63,9 +63,6 @@ async fn pragmas(db: &Connection) -> Result<()> {
pragma synchronous = off; -- integrity isn't very important here pragma synchronous = off; -- integrity isn't very important here
pragma mmap_size = 30000000000; pragma mmap_size = 30000000000;
pragma application_id = 924716026;
pragma user_version = 2; -- todo: on upgrade clear db if version is unexpected
create table if not exists preproc_cache ( create table if not exists preproc_cache (
adapter text not null, adapter text not null,
adapter_version integer not null, adapter_version integer not null,
@ -80,23 +77,36 @@ async fn pragmas(db: &Connection) -> Result<()> {
", ",
) )
}) })
.await?; .await.context("connect_pragmas")?;
/*let jm: String = db let jm: i64 = db
.call(|db| db.pragma_query_value(None, "journal_mode", |r| r.get(0))?) .call(|db| db.pragma_query_value(None, "application_id", |r| r.get(0)))
.await?; .await?;
if &jm != "wal" { if jm != 924716026 {
anyhow::bail!("journal mode is not wal"); // (probably) newly created db
}*/ create_pragmas(db).await.context("create_pragmas")?;
}
Ok(()) Ok(())
} }
async fn create_pragmas(db: &Connection) -> Result<()> {
db.call(|db| {
db.execute_batch(
"
pragma application_id = 924716026;
pragma user_version = 2; -- todo: on upgrade clear db if version is unexpected
",
)
})
.await?;
Ok(())
}
struct SqliteCache { struct SqliteCache {
db: Connection, db: Connection,
} }
impl SqliteCache { impl SqliteCache {
async fn new(path: &Path) -> Result<SqliteCache> { async fn new(path: &Path) -> Result<SqliteCache> {
let db = Connection::open(path.join("cache.sqlite3")).await?; let db = Connection::open(path.join("cache.sqlite3")).await?;
pragmas(&db).await?; connect_pragmas(&db).await?;
Ok(SqliteCache { db }) Ok(SqliteCache { db })
} }