database: prevent streaming reads from blocking normal reads

Streaming reads and normal reads share a semaphore, so if a bunch of
streaming reads use all available slots, no normal reads can proceed.

Fix by assigning streaming reads their own semaphore; they will compete
with normal reads once issued, and the I/O scheduler will determine the
winner.

Fixes #2663.
Message-Id: <20170802153107.939-1-avi@scylladb.com>
This commit is contained in:
Avi Kivity
2017-08-02 18:31:07 +03:00
committed by Duarte Nunes
parent 911536960a
commit f38e4ff3f9
2 changed files with 3 additions and 2 deletions

View File

@@ -3370,8 +3370,7 @@ database::make_keyspace_config(const keyspace_metadata& ksm) {
};
// No timeouts or queue length limits - a failure here can kill an entire repair.
// Trust the caller to limit concurrency.
// FIXME: consider a separate semaphore
cfg.streaming_read_concurrency_config.sem = &_read_concurrency_sem;
cfg.streaming_read_concurrency_config.sem = &_streaming_concurrency_sem;
cfg.cf_stats = &_cf_stats;
cfg.enable_incremental_backups = _enable_incremental_backups;

View File

@@ -997,6 +997,7 @@ public:
private:
::cf_stats _cf_stats;
static constexpr size_t max_concurrent_reads() { return 100; }
static constexpr size_t max_streaming_concurrent_reads() { return 10; } // They're rather heavyweight, so limit more
static constexpr size_t max_system_concurrent_reads() { return 10; }
static constexpr size_t max_concurrent_sstable_loads() { return 3; }
struct db_stats {
@@ -1024,6 +1025,7 @@ private:
flush_cpu_controller _memtable_cpu_controller;
semaphore _read_concurrency_sem{max_concurrent_reads()};
semaphore _streaming_concurrency_sem{max_streaming_concurrent_reads()};
restricted_mutation_reader_config _read_concurrency_config;
semaphore _system_read_concurrency_sem{max_system_concurrent_reads()};
restricted_mutation_reader_config _system_read_concurrency_config;