propagate additional errors from db options

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-08-10 20:14:26 +00:00
parent 2cf472a69b
commit f540bed61e
2 changed files with 25 additions and 14 deletions

View file

@ -49,7 +49,12 @@ impl Engine {
let mut db_env = Env::new().or_else(or_else)?;
let row_cache = Cache::new_lru_cache(row_cache_capacity_bytes);
let db_opts = db_options(config, &mut db_env, &row_cache, col_cache.get("primary").expect("cache"))?;
let db_opts = db_options(
config,
&mut db_env,
&row_cache,
col_cache.get("primary").expect("primary cache exists"),
)?;
let load_time = std::time::Instant::now();
if config.rocksdb_repair {
@ -178,7 +183,7 @@ impl Engine {
return Ok(());
}
let options = BackupEngineOptions::new(path.unwrap())?;
let options = BackupEngineOptions::new(path.expect("valid database backup path"))?;
let mut engine = BackupEngine::open(&options, &self.env)?;
if config.database_backups_to_keep > 0 {
if let Err(e) = engine.create_new_backup_flush(&self.db, true) {
@ -186,7 +191,7 @@ impl Engine {
}
let engine_info = engine.get_backup_info();
let info = &engine_info.last().unwrap();
let info = &engine_info.last().expect("backup engine info is not empty");
info!(
"Created database backup #{} using {} bytes in {} files",
info.backup_id, info.size, info.num_files,
@ -196,7 +201,7 @@ impl Engine {
if config.database_backups_to_keep >= 0 {
let keep = u32::try_from(config.database_backups_to_keep)?;
if let Err(e) = engine.purge_old_backups(keep.try_into()?) {
error!("Failed to purge old backup: {:?}", e.to_string());
error!("Failed to purge old backup: {e:?}");
}
}
@ -213,7 +218,7 @@ impl Engine {
}
let mut res = String::new();
let options = BackupEngineOptions::new(path.expect("valid path")).or_else(or_else)?;
let options = BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?;
let engine = BackupEngine::open(&options, &self.env).or_else(or_else)?;
for info in engine.get_backup_info() {
writeln!(

View file

@ -1,6 +1,6 @@
use std::{cmp, collections::HashMap, convert::TryFrom};
use conduit::{utils, Config, Result};
use conduit::{err, utils, Config, Result};
use rocksdb::{
statistics::StatsLevel, BlockBasedOptions, Cache, DBCompactionStyle, DBCompressionType, DBRecoveryMode, Env,
LogLevel, Options, UniversalCompactOptions, UniversalCompactionStopStyle,
@ -125,7 +125,7 @@ pub(crate) fn cf_options(
cfg,
cache,
name,
cache_size(cfg, cfg.shorteventid_cache_capacity, 64),
cache_size(cfg, cfg.shorteventid_cache_capacity, 64)?,
),
"eventid_shorteventid" => set_table_with_new_cache(
@ -133,11 +133,17 @@ pub(crate) fn cf_options(
cfg,
cache,
name,
cache_size(cfg, cfg.eventidshort_cache_capacity, 64),
cache_size(cfg, cfg.eventidshort_cache_capacity, 64)?,
),
"shorteventid_authchain" => {
set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.auth_chain_cache_capacity, 192));
set_table_with_new_cache(
&mut opts,
cfg,
cache,
name,
cache_size(cfg, cfg.auth_chain_cache_capacity, 192)?,
);
},
"shortstatekey_statekey" => set_table_with_new_cache(
@ -145,7 +151,7 @@ pub(crate) fn cf_options(
cfg,
cache,
name,
cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024),
cache_size(cfg, cfg.shortstatekey_cache_capacity, 1024)?,
),
"statekey_shortstatekey" => set_table_with_new_cache(
@ -153,11 +159,11 @@ pub(crate) fn cf_options(
cfg,
cache,
name,
cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024),
cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024)?,
),
"eventid_outlierpdu" => {
set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.pdu_cache_capacity, 1536));
set_table_with_new_cache(&mut opts, cfg, cache, name, cache_size(cfg, cfg.pdu_cache_capacity, 1536)?);
},
"pduid_pdu" => set_table_with_shared_cache(&mut opts, cfg, cache, name, "eventid_outlierpdu"),
@ -321,13 +327,13 @@ fn set_table_with_shared_cache(
opts.set_block_based_table_factory(&table);
}
fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize {
fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> Result<usize> {
let ents = f64::from(base_size) * config.cache_capacity_modifier;
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
(ents as usize)
.checked_mul(entity_size)
.expect("cache capacity size is too large")
.ok_or_else(|| err!(Config("cache_capacity_modifier", "Cache size is too large.")))
}
fn table_options(_config: &Config) -> BlockBasedOptions {