improve scaling (now O(log n) instead of somewhere between O(1) and O(n))
This commit is contained in:
parent
cdd2b30eb0
commit
01aa158b03
4 changed files with 76 additions and 7 deletions
|
|
@ -1,6 +1,6 @@
|
||||||
[package]
|
[package]
|
||||||
name = "microdb"
|
name = "microdb"
|
||||||
version = "0.3.4"
|
version = "0.3.5"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "A very small in-program database with cache, disk storage, etc."
|
description = "A very small in-program database with cache, disk storage, etc."
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
|
|
|
||||||
|
|
@ -59,3 +59,6 @@ Done! Write took 5570ms: 0.557ms per write; Read took 143ms: 0.0143ms per read.
|
||||||
As you can see, the speed is quite negigible, and it actually happens to be a lot faster
|
As you can see, the speed is quite negigible, and it actually happens to be a lot faster
|
||||||
than SQL databases like Postgres **for these kinds of dataset sizes**. This DB is not made to
|
than SQL databases like Postgres **for these kinds of dataset sizes**. This DB is not made to
|
||||||
be used on datasets of giant sizes, but it works exceptionally well for smaller datasets.
|
be used on datasets of giant sizes, but it works exceptionally well for smaller datasets.
|
||||||
|
|
||||||
|
Currently, the DB scales approximately at O(log n) for reading, but is slower for writing
|
||||||
|
(not sure how much though).
|
||||||
|
|
|
||||||
|
|
@ -124,6 +124,50 @@ fn main() {
|
||||||
);
|
);
|
||||||
db.remove("test").unwrap();
|
db.remove("test").unwrap();
|
||||||
|
|
||||||
|
println!("\nSetting horizontal_test/{{0..1000}} --raw--> true");
|
||||||
|
let wtime = SystemTime::now();
|
||||||
|
for i in 0..1000_u32 {
|
||||||
|
db.set_raw("horizontal_test".sub_path(i), true).unwrap()
|
||||||
|
}
|
||||||
|
let welapsed = wtime.elapsed().unwrap().as_millis();
|
||||||
|
println!("Reading back all values...");
|
||||||
|
let rtime = SystemTime::now();
|
||||||
|
for i in 0..1000_u32 {
|
||||||
|
assert_eq!(
|
||||||
|
black_box::<bool>(db.get_raw("horizontal_test".sub_path(i)).unwrap().unwrap()),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let relapsed = rtime.elapsed().unwrap().as_millis();
|
||||||
|
println!(
|
||||||
|
"Done! Write took {}ms: {}ms per write; Read took {}ms: {}ms per read.",
|
||||||
|
welapsed,
|
||||||
|
welapsed as f64 / 1000.0,
|
||||||
|
relapsed,
|
||||||
|
relapsed as f64 / 1000.0,
|
||||||
|
);
|
||||||
|
println!("\nSetting horizontal_test/{{0..2000}} --raw--> true");
|
||||||
|
let wtime = SystemTime::now();
|
||||||
|
for i in 0..2000_u32 {
|
||||||
|
db.set_raw("horizontal_test".sub_path(i), true).unwrap()
|
||||||
|
}
|
||||||
|
let welapsed = wtime.elapsed().unwrap().as_millis();
|
||||||
|
println!("Reading back all values...");
|
||||||
|
let rtime = SystemTime::now();
|
||||||
|
for i in 0..2000_u32 {
|
||||||
|
assert_eq!(
|
||||||
|
black_box::<bool>(db.get_raw("horizontal_test".sub_path(i)).unwrap().unwrap()),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let relapsed = rtime.elapsed().unwrap().as_millis();
|
||||||
|
println!(
|
||||||
|
"Done! Write took {}ms: {}ms per write; Read took {}ms: {}ms per read.",
|
||||||
|
welapsed,
|
||||||
|
welapsed as f64 / 2000.0,
|
||||||
|
relapsed,
|
||||||
|
relapsed as f64 / 2000.0,
|
||||||
|
);
|
||||||
println!("\nSetting horizontal_test/{{0..10000}} --raw--> true");
|
println!("\nSetting horizontal_test/{{0..10000}} --raw--> true");
|
||||||
let wtime = SystemTime::now();
|
let wtime = SystemTime::now();
|
||||||
for i in 0..10000_u32 {
|
for i in 0..10000_u32 {
|
||||||
|
|
@ -146,6 +190,28 @@ fn main() {
|
||||||
relapsed,
|
relapsed,
|
||||||
relapsed as f64 / 10000.0,
|
relapsed as f64 / 10000.0,
|
||||||
);
|
);
|
||||||
|
println!("\nSetting horizontal_test/{{0..20000}} --raw--> true");
|
||||||
|
let wtime = SystemTime::now();
|
||||||
|
for i in 0..20000_u32 {
|
||||||
|
db.set_raw("horizontal_test".sub_path(i), true).unwrap()
|
||||||
|
}
|
||||||
|
let welapsed = wtime.elapsed().unwrap().as_millis();
|
||||||
|
println!("Reading back all values...");
|
||||||
|
let rtime = SystemTime::now();
|
||||||
|
for i in 0..20000_u32 {
|
||||||
|
assert_eq!(
|
||||||
|
black_box::<bool>(db.get_raw("horizontal_test".sub_path(i)).unwrap().unwrap()),
|
||||||
|
true
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let relapsed = rtime.elapsed().unwrap().as_millis();
|
||||||
|
println!(
|
||||||
|
"Done! Write took {}ms: {}ms per write; Read took {}ms: {}ms per read.",
|
||||||
|
welapsed,
|
||||||
|
welapsed as f64 / 20000.0,
|
||||||
|
relapsed,
|
||||||
|
relapsed as f64 / 20000.0,
|
||||||
|
);
|
||||||
|
|
||||||
println!("\n\n-- benchmarks done --\n\n");
|
println!("\n\n-- benchmarks done --\n\n");
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashMap,
|
collections::BTreeMap,
|
||||||
fs::{self, File},
|
fs::{self, File},
|
||||||
hint::black_box,
|
hint::black_box,
|
||||||
io::{self, ErrorKind, Read, Seek, SeekFrom, Write},
|
io::{self, ErrorKind, Read, Seek, SeekFrom, Write},
|
||||||
|
|
@ -38,7 +38,7 @@ struct AllocationTable {
|
||||||
block_size: usize,
|
block_size: usize,
|
||||||
blocks_reserved: usize,
|
blocks_reserved: usize,
|
||||||
free: Vec<(usize, usize)>,
|
free: Vec<(usize, usize)>,
|
||||||
map: HashMap<String, Allocation>,
|
map: BTreeMap<String, Allocation>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
|
@ -46,7 +46,7 @@ struct InnerFAlloc {
|
||||||
cache_period: u128,
|
cache_period: u128,
|
||||||
data: File,
|
data: File,
|
||||||
alloc: AllocationTable,
|
alloc: AllocationTable,
|
||||||
cache: HashMap<String, (u128, bool, Vec<u8>)>,
|
cache: BTreeMap<String, (u128, bool, Vec<u8>)>,
|
||||||
last_cache_check: u128,
|
last_cache_check: u128,
|
||||||
shutdown: bool,
|
shutdown: bool,
|
||||||
}
|
}
|
||||||
|
|
@ -96,7 +96,7 @@ impl AllocationTable {
|
||||||
for _ in 0..free_len {
|
for _ in 0..free_len {
|
||||||
free.push((deserialize_u64!(f, buf64), deserialize_u64!(f, buf64)));
|
free.push((deserialize_u64!(f, buf64), deserialize_u64!(f, buf64)));
|
||||||
}
|
}
|
||||||
let mut map = HashMap::with_capacity(1024);
|
let mut map = BTreeMap::new();
|
||||||
for _ in 0..map_len {
|
for _ in 0..map_len {
|
||||||
let str_len = deserialize_u64!(f, buf64);
|
let str_len = deserialize_u64!(f, buf64);
|
||||||
let mut buf = vec![0_u8; str_len];
|
let mut buf = vec![0_u8; str_len];
|
||||||
|
|
@ -290,7 +290,7 @@ impl FAlloc {
|
||||||
cache_period,
|
cache_period,
|
||||||
data,
|
data,
|
||||||
alloc,
|
alloc,
|
||||||
cache: HashMap::with_capacity(1024),
|
cache: BTreeMap::new(),
|
||||||
last_cache_check: 0,
|
last_cache_check: 0,
|
||||||
shutdown: false,
|
shutdown: false,
|
||||||
}));
|
}));
|
||||||
|
|
@ -390,7 +390,7 @@ impl FAlloc {
|
||||||
block_size,
|
block_size,
|
||||||
blocks_reserved: 0,
|
blocks_reserved: 0,
|
||||||
free: Vec::new(),
|
free: Vec::new(),
|
||||||
map: HashMap::with_capacity(1024),
|
map: BTreeMap::new(),
|
||||||
},
|
},
|
||||||
cache_period,
|
cache_period,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue