core: avoid recursion in keep_doing()

Recursion takes up space on stack which takes up space in caches which
means less room for useful data.

In addition to that, a limit on iteration count can be larger than the
limit on recursion, because we're not limited by stack size here.

Also, recursion makes flame-graphs really hard to analyze because
keep_doing() frames appear at different levels of nesting in the
profile leading to many short "towers" instead of one big tower.

This change reuses the same counter for limiting iterations as is used
to limit the number of tasks executed by the reactor before polling.

There was a run-time parameter added for controlling task quota.
This commit is contained in:
Tomasz Grabiec
2014-11-18 17:35:02 +01:00
committed by Avi Kivity
parent 8cb9185cb6
commit f458117b83
3 changed files with 29 additions and 8 deletions

View File

@@ -70,13 +70,28 @@ future<> do_until(StopCondition&& stop_cond, AsyncAction&& action) {
template<typename AsyncAction>
static inline
future<> keep_doing(AsyncAction&& action) {
try {
return action().then([action = std::forward<AsyncAction>(action)] () mutable {
return keep_doing(std::forward<AsyncAction>(action));
});
} catch (...) {
return make_exception_future(std::current_exception());
while (task_quota) {
auto f = action();
if (!f.available()) {
return f.then([action = std::forward<AsyncAction>(action)] () mutable {
return keep_doing(std::forward<AsyncAction>(action));
});
}
if (f.failed()) {
return std::move(f);
}
--task_quota;
}
promise<> p;
auto f = p.get_future();
schedule(make_task([action = std::forward<AsyncAction>(action), p = std::move(p)] () mutable {
keep_doing(std::forward<AsyncAction>(action)).forward_to(std::move(p));
}));
return f;
}
template<typename Iterator, typename AsyncAction>

View File

@@ -74,6 +74,7 @@ void reactor::configure(boost::program_options::variables_map vm) {
? network_stack_registry::create(sstring(vm["network-stack"].as<std::string>()), vm)
: network_stack_registry::create(vm);
_handle_sigint = !vm.count("no-handle-interrupt");
_task_quota = vm["task-quota"].as<int>();
}
future<> reactor_backend_epoll::get_epoll_future(pollable_fd_state& pfd,
@@ -420,8 +421,9 @@ int reactor::run() {
});
complete_timers();
while (true) {
unsigned loop = 0;
while (!_pending_tasks.empty() && loop++ < 200) {
task_quota = _task_quota;
while (!_pending_tasks.empty() && task_quota) {
--task_quota;
auto tsk = std::move(_pending_tasks.front());
_pending_tasks.pop_front();
tsk->run();
@@ -731,6 +733,7 @@ reactor::get_options_description() {
sprint("select network stack (valid values: %s)",
format_separated(net_stack_names.begin(), net_stack_names.end(), ", ")).c_str())
("no-handle-interrupt", "ignore SIGINT (for gdb)")
("task-quota", bpo::value<int>()->default_value(200), "Max number of tasks executed between polls and in loops")
;
opts.add(network_stack_registry::options_description());
return opts;
@@ -823,6 +826,7 @@ void smp::join_all()
}
__thread size_t future_avail_count = 0;
__thread size_t task_quota = 0;
thread_local reactor engine;

View File

@@ -551,6 +551,7 @@ private:
semaphore _io_context_available;
circular_buffer<std::unique_ptr<task>> _pending_tasks;
thread_pool _thread_pool;
size_t _task_quota;
private:
void abort_on_error(int ret);
void complete_timers();
@@ -642,6 +643,7 @@ public:
};
extern thread_local reactor engine;
extern __thread size_t task_quota;
class smp {
static std::vector<posix_thread> _threads;