Replace println/dbg calls with corresponding macros from tracing crate
This commit is contained in:
parent
3930fd08d7
commit
2a0515f528
5 changed files with 68 additions and 88 deletions
|
@ -135,7 +135,6 @@ type TupleOfBytes = (Vec<u8>, Vec<u8>);
|
|||
|
||||
impl SqliteTable {
|
||||
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||
//dbg!(&self.name);
|
||||
Ok(guard
|
||||
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
||||
.query_row([key], |row| row.get(0))
|
||||
|
@ -143,7 +142,6 @@ impl SqliteTable {
|
|||
}
|
||||
|
||||
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
||||
//dbg!(&self.name);
|
||||
guard.execute(
|
||||
format!(
|
||||
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
||||
|
@ -176,10 +174,7 @@ impl SqliteTable {
|
|||
statement
|
||||
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||
.unwrap()
|
||||
.map(move |r| {
|
||||
//dbg!(&name);
|
||||
r.unwrap()
|
||||
}),
|
||||
.map(move |r| r.unwrap()),
|
||||
);
|
||||
|
||||
Box::new(PreparedStatementIterator {
|
||||
|
@ -276,10 +271,7 @@ impl KvTree for SqliteTable {
|
|||
statement
|
||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||
.unwrap()
|
||||
.map(move |r| {
|
||||
//dbg!(&name);
|
||||
r.unwrap()
|
||||
}),
|
||||
.map(move |r| r.unwrap()),
|
||||
);
|
||||
Box::new(PreparedStatementIterator {
|
||||
iterator,
|
||||
|
@ -301,10 +293,7 @@ impl KvTree for SqliteTable {
|
|||
statement
|
||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||
.unwrap()
|
||||
.map(move |r| {
|
||||
//dbg!(&name);
|
||||
r.unwrap()
|
||||
}),
|
||||
.map(move |r| r.unwrap()),
|
||||
);
|
||||
|
||||
Box::new(PreparedStatementIterator {
|
||||
|
|
|
@ -258,7 +258,7 @@ impl KeyValueDatabase {
|
|||
};
|
||||
|
||||
if config.max_request_size < 1024 {
|
||||
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
|
||||
error!(?config.max_request_size, "Max request size is less than 1KB. Please increase it.");
|
||||
}
|
||||
|
||||
let db_raw = Box::new(Self {
|
||||
|
@ -483,7 +483,7 @@ impl KeyValueDatabase {
|
|||
for user in services().rooms.state_cache.room_members(&room?) {
|
||||
let user = user?;
|
||||
if user.server_name() != services().globals.server_name() {
|
||||
println!("Migration: Creating user {}", user);
|
||||
info!(?user, "Migration: creating user");
|
||||
services().users.create(&user, None)?;
|
||||
}
|
||||
}
|
||||
|
@ -545,7 +545,6 @@ impl KeyValueDatabase {
|
|||
current_state: HashSet<_>,
|
||||
last_roomstates: &mut HashMap<_, _>| {
|
||||
counter += 1;
|
||||
println!("counter: {}", counter);
|
||||
let last_roomsstatehash = last_roomstates.get(current_room);
|
||||
|
||||
let states_parents = last_roomsstatehash.map_or_else(
|
||||
|
@ -742,15 +741,13 @@ impl KeyValueDatabase {
|
|||
new_key.extend_from_slice(word);
|
||||
new_key.push(0xff);
|
||||
new_key.extend_from_slice(pdu_id_count);
|
||||
println!("old {:?}", key);
|
||||
println!("new {:?}", new_key);
|
||||
Some((new_key, Vec::new()))
|
||||
})
|
||||
.peekable();
|
||||
|
||||
while iter.peek().is_some() {
|
||||
db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?;
|
||||
println!("smaller batch done");
|
||||
debug!("Inserted smaller batch");
|
||||
}
|
||||
|
||||
info!("Deleting starts");
|
||||
|
@ -760,7 +757,6 @@ impl KeyValueDatabase {
|
|||
.iter()
|
||||
.filter_map(|(key, _)| {
|
||||
if key.starts_with(b"!") {
|
||||
println!("del {:?}", key);
|
||||
Some(key)
|
||||
} else {
|
||||
None
|
||||
|
@ -769,7 +765,6 @@ impl KeyValueDatabase {
|
|||
.collect();
|
||||
|
||||
for key in batch2 {
|
||||
println!("del");
|
||||
db.tokenids.remove(&key)?;
|
||||
}
|
||||
|
||||
|
@ -945,7 +940,6 @@ impl KeyValueDatabase {
|
|||
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{signal, SignalKind};
|
||||
use tracing::info;
|
||||
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
|
@ -961,23 +955,23 @@ impl KeyValueDatabase {
|
|||
#[cfg(unix)]
|
||||
tokio::select! {
|
||||
_ = i.tick() => {
|
||||
info!("cleanup: Timer ticked");
|
||||
debug!("cleanup: Timer ticked");
|
||||
}
|
||||
_ = s.recv() => {
|
||||
info!("cleanup: Received SIGHUP");
|
||||
debug!("cleanup: Received SIGHUP");
|
||||
}
|
||||
};
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
i.tick().await;
|
||||
info!("cleanup: Timer ticked")
|
||||
debug!("cleanup: Timer ticked")
|
||||
}
|
||||
|
||||
let start = Instant::now();
|
||||
if let Err(e) = services().globals.cleanup() {
|
||||
error!("cleanup: Errored: {}", e);
|
||||
} else {
|
||||
info!("cleanup: Finished in {:?}", start.elapsed());
|
||||
debug!("cleanup: Finished in {:?}", start.elapsed());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
78
src/main.rs
78
src/main.rs
|
@ -40,7 +40,7 @@ use tower_http::{
|
|||
trace::TraceLayer,
|
||||
ServiceBuilderExt as _,
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
use tracing::{error, info, warn};
|
||||
use tracing_subscriber::{prelude::*, EnvFilter};
|
||||
|
||||
pub use conduit::*; // Re-export everything from the library crate
|
||||
|
@ -68,27 +68,16 @@ async fn main() {
|
|||
let config = match raw_config.extract::<Config>() {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e);
|
||||
eprintln!(
|
||||
"It looks like your config is invalid. The following error occurred: {}",
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
config.warn_deprecated();
|
||||
|
||||
if let Err(e) = KeyValueDatabase::load_or_create(config).await {
|
||||
eprintln!(
|
||||
"The database couldn't be loaded or created. The following error occured: {}",
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
};
|
||||
|
||||
let config = &services().globals.config;
|
||||
|
||||
let start = async {
|
||||
run_server().await.unwrap();
|
||||
};
|
||||
|
||||
if config.allow_jaeger {
|
||||
opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new());
|
||||
let tracer = opentelemetry_jaeger::new_agent_pipeline()
|
||||
|
@ -113,35 +102,44 @@ async fn main() {
|
|||
.with(filter_layer)
|
||||
.with(telemetry);
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
start.await;
|
||||
println!("exporting remaining spans");
|
||||
opentelemetry::global::shutdown_tracer_provider();
|
||||
} else if config.tracing_flame {
|
||||
let registry = tracing_subscriber::Registry::default();
|
||||
let (flame_layer, _guard) =
|
||||
tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap();
|
||||
let flame_layer = flame_layer.with_empty_samples(false);
|
||||
|
||||
let filter_layer = EnvFilter::new("trace,h2=off");
|
||||
|
||||
let subscriber = registry.with(filter_layer).with(flame_layer);
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
} else {
|
||||
let registry = tracing_subscriber::Registry::default();
|
||||
if config.tracing_flame {
|
||||
let (flame_layer, _guard) =
|
||||
tracing_flame::FlameLayer::with_file("./tracing.folded").unwrap();
|
||||
let flame_layer = flame_layer.with_empty_samples(false);
|
||||
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e);
|
||||
EnvFilter::try_new("warn").unwrap()
|
||||
}
|
||||
};
|
||||
|
||||
let filter_layer = EnvFilter::new("trace,h2=off");
|
||||
let subscriber = registry.with(filter_layer).with(fmt_layer);
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
}
|
||||
|
||||
let subscriber = registry.with(filter_layer).with(flame_layer);
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
start.await;
|
||||
} else {
|
||||
let fmt_layer = tracing_subscriber::fmt::Layer::new();
|
||||
let filter_layer = match EnvFilter::try_new(&config.log) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
eprintln!("It looks like your log config is invalid. The following error occurred: {}", e);
|
||||
EnvFilter::try_new("warn").unwrap()
|
||||
}
|
||||
};
|
||||
info!("Loading database");
|
||||
if let Err(error) = KeyValueDatabase::load_or_create(config).await {
|
||||
error!(?error, "The database couldn't be loaded or created");
|
||||
|
||||
let subscriber = registry.with(filter_layer).with(fmt_layer);
|
||||
tracing::subscriber::set_global_default(subscriber).unwrap();
|
||||
start.await;
|
||||
}
|
||||
std::process::exit(1);
|
||||
};
|
||||
let config = &services().globals.config;
|
||||
|
||||
info!("Starting server");
|
||||
run_server().await.unwrap();
|
||||
|
||||
if config.allow_jaeger {
|
||||
opentelemetry::global::shutdown_tracer_provider();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
|
||||
pub use data::Data;
|
||||
use ruma::{api::client::error::ErrorKind, EventId, RoomId};
|
||||
use tracing::log::warn;
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
use crate::{services, Error, Result};
|
||||
|
||||
|
@ -88,10 +88,10 @@ impl Service {
|
|||
.rooms
|
||||
.auth_chain
|
||||
.cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?;
|
||||
println!(
|
||||
"cache missed event {} with auth chain len {}",
|
||||
event_id,
|
||||
auth_chain.len()
|
||||
debug!(
|
||||
event_id = ?event_id,
|
||||
chain_length = ?auth_chain.len(),
|
||||
"Cache missed event"
|
||||
);
|
||||
chunk_cache.extend(auth_chain.iter());
|
||||
|
||||
|
@ -101,11 +101,11 @@ impl Service {
|
|||
}
|
||||
};
|
||||
}
|
||||
println!(
|
||||
"chunk missed with len {}, event hits2: {}, misses2: {}",
|
||||
chunk_cache.len(),
|
||||
hits2,
|
||||
misses2
|
||||
debug!(
|
||||
chunk_cache_length = ?chunk_cache.len(),
|
||||
hits = ?hits2,
|
||||
misses = ?misses2,
|
||||
"Chunk missed",
|
||||
);
|
||||
let chunk_cache = Arc::new(chunk_cache);
|
||||
services()
|
||||
|
@ -115,11 +115,11 @@ impl Service {
|
|||
full_auth_chain.extend(chunk_cache.iter());
|
||||
}
|
||||
|
||||
println!(
|
||||
"total: {}, chunk hits: {}, misses: {}",
|
||||
full_auth_chain.len(),
|
||||
hits,
|
||||
misses
|
||||
debug!(
|
||||
chain_length = ?full_auth_chain.len(),
|
||||
hits = ?hits,
|
||||
misses = ?misses,
|
||||
"Auth chain stats",
|
||||
);
|
||||
|
||||
Ok(full_auth_chain
|
||||
|
@ -151,10 +151,10 @@ impl Service {
|
|||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
warn!("Could not find pdu mentioned in auth events: {}", event_id);
|
||||
warn!(?event_id, "Could not find pdu mentioned in auth events");
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Could not load event in auth chain: {} {}", event_id, e);
|
||||
Err(error) => {
|
||||
error!(?event_id, ?error, "Could not load event in auth chain");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -839,8 +839,8 @@ impl Service {
|
|||
info!("Preparing for stateres to derive new room state");
|
||||
let mut extremity_sstatehashes = HashMap::new();
|
||||
|
||||
info!("Loading extremities");
|
||||
for id in dbg!(&extremities) {
|
||||
info!(?extremities, "Loading extremities");
|
||||
for id in &extremities {
|
||||
match services().rooms.timeline.get_pdu(id)? {
|
||||
Some(leaf_pdu) => {
|
||||
extremity_sstatehashes.insert(
|
||||
|
@ -1273,7 +1273,6 @@ impl Service {
|
|||
// This return value is the key used for sorting events,
|
||||
// events are then sorted by power level, time,
|
||||
// and lexically by event_id.
|
||||
println!("{}", event_id);
|
||||
Ok((
|
||||
int!(0),
|
||||
MilliSecondsSinceUnixEpoch(
|
||||
|
|
Loading…
Add table
Reference in a new issue