Add id parsing benchmarks
Initial results: running 4 tests test hash_wikidata ... bench: 14 ns/iter (+/- 0) test hash_wikipedia ... bench: 34 ns/iter (+/- 1) test parse_wikidata ... bench: 18 ns/iter (+/- 0) test parse_wikipedia ... bench: 60,682 ns/iter (+/- 83,376) Based on these results and a flamegraph of loading the file, url parsing is the most expensive part. Signed-off-by: Evan Lloyd New-Schmidt <evan@new-schmidt.com>
This commit is contained in:
parent
34ce30301c
commit
00a199e20c
1 changed files with 43 additions and 0 deletions
43
benches/id_parsing.rs
Normal file
43
benches/id_parsing.rs
Normal file
|
@ -0,0 +1,43 @@
|
|||
#![feature(test)]
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
extern crate om_wikiparser;
|
||||
extern crate test;
|
||||
|
||||
#[bench]
|
||||
fn parse_wikipedia(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let title = om_wikiparser::wm::WikipediaTitleNorm::from_url(
|
||||
"https://en.wikipedia.org/wiki/Article_Title",
|
||||
)
|
||||
.unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn hash_wikipedia(b: &mut test::Bencher) {
|
||||
let title = om_wikiparser::wm::WikipediaTitleNorm::from_url(
|
||||
"https://en.wikipedia.org/wiki/Article_Title",
|
||||
)
|
||||
.unwrap();
|
||||
let mut set = HashSet::new();
|
||||
b.iter(|| {
|
||||
set.insert(&title);
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn parse_wikidata(b: &mut test::Bencher) {
|
||||
b.iter(|| {
|
||||
let qid = om_wikiparser::wm::WikidataQid::from_str("Q123456789").unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn hash_wikidata(b: &mut test::Bencher) {
|
||||
let qid = om_wikiparser::wm::WikidataQid::from_str("Q123456789").unwrap();
|
||||
let mut set = HashSet::new();
|
||||
b.iter(|| {
|
||||
set.insert(&qid);
|
||||
});
|
||||
}
|
Loading…
Add table
Reference in a new issue