dirty_memory_manager: move more allocation_queue functions out of region_group

More mechanical changes, reducing churn for later patches.
This commit is contained in:
Avi Kivity
2022-09-16 19:38:22 +03:00
parent 4bc2638cf9
commit 34d5322368
2 changed files with 56 additions and 18 deletions

View File

@@ -109,6 +109,13 @@ region_group::execution_permitted() noexcept {
}) == nullptr;
}
void
allocation_queue::execute_one() {
auto req = std::move(_blocked_requests.front());
_blocked_requests.pop_front();
req->allocate();
}
future<>
region_group::start_releaser(scheduling_group deferred_work_sg) {
return with_scheduling_group(deferred_work_sg, [this] {
@@ -119,9 +126,7 @@ region_group::start_releaser(scheduling_group deferred_work_sg) {
}
if (!_blocked_requests.empty() && execution_permitted()) {
auto req = std::move(_blocked_requests.front());
_blocked_requests.pop_front();
req->allocate();
_blocked_requests.execute_one();
return make_ready_future<stop_iteration>(stop_iteration::no);
} else {
// Block reclaiming to prevent signal() from being called by reclaimer inside wait()
@@ -198,4 +203,8 @@ void allocation_queue::on_request_expiry::operator()(std::unique_ptr<allocating_
func->fail(std::make_exception_ptr(blocked_requests_timed_out_error{_name}));
}
allocation_queue::allocation_queue(allocation_queue::on_request_expiry on_expiry)
: _blocked_requests(std::move(on_expiry)) {
}
}

View File

@@ -157,6 +157,35 @@ public:
explicit on_request_expiry(sstring name) : _name(std::move(name)) {}
void operator()(std::unique_ptr<allocating_function>&) noexcept;
};
private:
// It is a more common idiom to just hold the promises in the circular buffer and make them
// ready. However, in the time between the promise being made ready and the function execution,
// it could be that our memory usage went up again. To protect against that, we have to recheck
// if memory is still available after the future resolves.
//
// But we can greatly simplify it if we store the function itself in the circular_buffer, and
// execute it synchronously in release_requests() when we are sure memory is available.
//
// This allows us to easily provide strong execution guarantees while keeping all re-check
// complication in release_requests and keep the main request execution path simpler.
expiring_fifo<std::unique_ptr<allocating_function>, on_request_expiry, db::timeout_clock> _blocked_requests;
uint64_t _blocked_requests_counter = 0;
public:
explicit allocation_queue(on_request_expiry on_expiry);
void execute_one();
void push_back(std::unique_ptr<allocating_function>, db::timeout_clock::time_point timeout);
size_t blocked_requests() const noexcept;
uint64_t blocked_requests_counter() const noexcept;
size_t size() const noexcept { return _blocked_requests.size(); }
bool empty() const noexcept { return _blocked_requests.empty(); }
};
// Groups regions for the purpose of statistics. Can be nested.
@@ -180,19 +209,7 @@ class region_group : public region_listener {
using on_request_expiry = allocation_queue::on_request_expiry;
// It is a more common idiom to just hold the promises in the circular buffer and make them
// ready. However, in the time between the promise being made ready and the function execution,
// it could be that our memory usage went up again. To protect against that, we have to recheck
// if memory is still available after the future resolves.
//
// But we can greatly simplify it if we store the function itself in the circular_buffer, and
// execute it synchronously in release_requests() when we are sure memory is available.
//
// This allows us to easily provide strong execution guarantees while keeping all re-check
// complication in release_requests and keep the main request execution path simpler.
expiring_fifo<std::unique_ptr<allocating_function>, on_request_expiry, db::timeout_clock> _blocked_requests;
uint64_t _blocked_requests_counter = 0;
allocation_queue _blocked_requests;
condition_variable _relief;
future<> _releaser;
@@ -563,11 +580,17 @@ region_group::run_when_memory_available(Func&& func, db::timeout_clock::time_poi
auto fn = std::make_unique<concrete_allocating_function<Func>>(std::forward<Func>(func));
auto fut = fn->get_future();
_blocked_requests.push_back(std::move(fn), timeout);
++_blocked_requests_counter;
return fut;
}
inline
void
allocation_queue::push_back(std::unique_ptr<allocation_queue::allocating_function> f, db::timeout_clock::time_point timeout) {
_blocked_requests.push_back(std::move(f));
++_blocked_requests_counter;
}
inline
size_t
region_group::blocked_requests() const noexcept {
@@ -576,10 +599,16 @@ region_group::blocked_requests() const noexcept {
inline
uint64_t
region_group::blocked_requests_counter() const noexcept {
allocation_queue::blocked_requests_counter() const noexcept {
return _blocked_requests_counter;
}
inline
uint64_t
region_group::blocked_requests_counter() const noexcept {
return _blocked_requests.blocked_requests_counter();
}
inline
bool
region_group::under_pressure() const noexcept {