Add a performance test case for CQL statement parsing to better understand its performance impact. We also include ANTLR tokenizer and parser setup as that's what we do in query_processor for each request. Running the test on my Haswell machine yields the following results: [penberg@nero urchin]$ build/release/tests/perf/perf_cql_parser Timing CQL statement parsing... 108090.10 tps 125366.11 tps 124400.64 tps 124274.75 tps 124850.85 tps That means that CQL parsing alone sets an upper limit of 120k requests per second for Urchin for a single core. Signed-off-by: Pekka Enberg <penberg@cloudius-systems.com>
33 lines
784 B
C++
33 lines
784 B
C++
/*
|
|
* Copyright 2015 Cloudius Systems
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
#include "core/print.hh"
|
|
|
|
#include <chrono>
|
|
#include <iostream>
|
|
|
|
template<typename Func>
|
|
void time_it(Func func, int iterations = 5) {
|
|
using clk = std::chrono::high_resolution_clock;
|
|
|
|
for (int i = 0; i < iterations; i++) {
|
|
auto start = clk::now();
|
|
auto end_at = start + std::chrono::seconds(1);
|
|
uint64_t count = 0;
|
|
|
|
while (clk::now() < end_at) {
|
|
for (int i = 0; i < 10000; i++) { // amortize clock reading cost
|
|
func();
|
|
count++;
|
|
}
|
|
}
|
|
|
|
auto end = clk::now();
|
|
auto duration = std::chrono::duration<double>(end - start).count();
|
|
std::cout << sprint("%.2f", (double)count / duration) << " tps\n";
|
|
}
|
|
}
|