mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-22 01:20:39 +00:00
sstable: Fix write buffer size
The current 4K write buffer is ridiculously small and forces Urchin to issue small I/O batches. Increase the buffer size to 64K. Before: Results: op rate : 27265 partition rate : 27265 row rate : 27265 latency mean : 1.2 latency median : 0.9 latency 95th percentile : 2.4 latency 99th percentile : 10.6 latency 99.9th percentile : 14.3 latency max : 44.7 Total operation time : 00:00:30 END After: Results: op rate : 35365 partition rate : 35365 row rate : 35365 latency mean : 0.9 latency median : 0.8 latency 95th percentile : 1.8 latency 99th percentile : 8.8 latency 99.9th percentile : 21.8 latency max : 272.2 Total operation time : 00:00:34 END Signed-off-by: Pekka Enberg <penberg@cloudius-systems.com>
This commit is contained in:
@@ -1177,9 +1177,11 @@ future<> sstable::write_components(const memtable& mt) {
|
||||
_components.insert(component_type::CRC);
|
||||
}
|
||||
|
||||
constexpr size_t sstable_buffer_size = 64*1024;
|
||||
|
||||
// TODO: Add compression support by having a specialized output stream.
|
||||
auto w = make_shared<checksummed_file_writer>(_data_file, 4096, checksum_file);
|
||||
auto index = make_shared<file_writer>(_index_file, 4096);
|
||||
auto w = make_shared<checksummed_file_writer>(_data_file, sstable_buffer_size, checksum_file);
|
||||
auto index = make_shared<file_writer>(_index_file, sstable_buffer_size);
|
||||
|
||||
prepare_summary(_summary, mt);
|
||||
auto filter_fp_chance = mt.schema()->bloom_filter_fp_chance();
|
||||
|
||||
Reference in New Issue
Block a user