From 503f1bf4d049cfb5b5a8b9cae8a1200f0d5b5cc7 Mon Sep 17 00:00:00 2001 From: Avi Kivity Date: Thu, 11 Dec 2014 19:19:09 +0200 Subject: [PATCH] virtio: batch transmitted packets Instead of placing packets directly into the virtio ring, add them to a temporary queue, and flush it when we are polled. This reduces cross-cpu writes and kicks. --- net/virtio.cc | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/net/virtio.cc b/net/virtio.cc index e8d7049348..a57a667e41 100644 --- a/net/virtio.cc +++ b/net/virtio.cc @@ -270,6 +270,7 @@ private: semaphore _available_descriptors = { 0 }; int _free_head = -1; int _free_last = -1; + std::vector _batch; std::experimental::optional _poller; bool _poll_mode = false; public: @@ -300,6 +301,8 @@ public: template void post(Iterator begin, Iterator end); + void flush_batch(); + semaphore& available_descriptors() { return _available_descriptors; } private: // Let host know about interrupt delivery @@ -412,12 +415,25 @@ void vring::run() { complete(); } else { _poller = reactor::poller([this] { + flush_batch(); do_complete(); return true; }); } } +void vring::flush_batch() { + if (_batch.empty()) { + return; + } + for (auto desc_head : _batch) { + _avail._shared->_ring[masked(_avail._head++)] = desc_head; + } + _batch.clear(); + _avail._shared->_idx.store(_avail._head, std::memory_order_release); + kick(); +} + template void vring::post(Iterator begin, Iterator end) { // Note: buffer_chain here is any container of buffer, not @@ -443,13 +459,19 @@ void vring::post(Iterator begin, Iterator end) { } auto desc_head = pseudo_head._next; _completions[desc_head] = std::move(bc.completed); - _avail._shared->_ring[masked(_avail._head++)] = desc_head; + if (!_poll_mode) { + _avail._shared->_ring[masked(_avail._head++)] = desc_head; + } else { + _batch.push_back(desc_head); + } _avail._avail_added_since_kick++; }); - _avail._shared->_idx.store(_avail._head, std::memory_order_release); - kick(); if (!_poll_mode) { + _avail._shared->_idx.store(_avail._head, std::memory_order_release); + kick(); do_complete(); + } else if (_batch.size() >= 16) { + flush_batch(); } }