diff --git a/.gitmodules b/.gitmodules
index 49853b8d2f..c5857050fa 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,6 @@
-[submodule "dpdk"]
- path = dpdk
- url = ../dpdk
+[submodule "seastar"]
+ path = seastar
+ url = ../seastar
[submodule "swagger-ui"]
path = swagger-ui
url = ../urchin-swagger-ui
diff --git a/LICENSE.seastar b/LICENSE.seastar
deleted file mode 100644
index f433b1a53f..0000000000
--- a/LICENSE.seastar
+++ /dev/null
@@ -1,177 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/NOTICE b/NOTICE
deleted file mode 100644
index ef3d318a60..0000000000
--- a/NOTICE
+++ /dev/null
@@ -1,11 +0,0 @@
-Seastar Framework
-Copyright 2015 Cloudius Systems
-
-This works contains software from the OSv project (http://osv.io), licensed
-under the BSD license.
-
-This work contains software from the DPDK project (http://dpdk.org), licensed
-under the BSD license. The software is under the dpdk/ directory.
-
-This works contains software from https://github.com/hideo55/cpp-HyperLogLog, licensed
-under the MIT license.
diff --git a/README-OSv.md b/README-OSv.md
deleted file mode 100644
index 55fe7fdca5..0000000000
--- a/README-OSv.md
+++ /dev/null
@@ -1,108 +0,0 @@
-Running Seastar on OSv
-======================
-
-1. Compiling Seastar for OSv
-----------------------------
-
-Before compiling Seastar, configure it with the following command:
-
-./configure.py --so --disable-hwloc \
- --cflags="-DDEFAULT_ALLOCATOR -fvisibility=default -DHAVE_OSV -I../osv/include" \
- --mode release
-
-Or more easily, use the "--with-osv=..." shortcut for all the above settings:
-
-./configure.py --mode release --with-osv=../osv
-
-
-Explanation of these configuration options:
- * The "--so" option is needed so that the Seastar applications, such as
- httpd, are built as shared objects instead of ordinary executables.
- Note the "--pie" option also exists, but because of bug #352 in OSv,
- and the fact that Seastar uses thread_local in one place, these PIEs
- cannot be run on OSv.
- * The "--disable-hwloc" option is needed so that Seastar does not attempt
- to use the complex NUMA-discovery library, which isn't supported on OSv
- (and isn't relevant anyway, because VMs with NUMA are not (yet) common.
- * The "-DEFAULT_ALLOCATOR" uses in Seastar the system's regular malloc()
- and free(), instead of redefining them. Without this flag, what seems
- to happen is that some code compiled into the OSv kernel (notably,
- libboost_program_options.a) uses the standard malloc(), while inline
- code compiled into Seastar uses the Seastar free() to try and free that
- memory, resulting in a spectacular crash.
- * The "-fvisibility=default" option disables the "-fvisibility=hidden"
- option which is hard-coded into Seastar's build file. Supposedly
- "-fvisibility=hidden" provides some better optimization in some cases,
- but it also means OSv can't find main() in the generated shared object!
- So either we use "-fvisibility=default", as suggested here, or
- alternatively, make *only* main() visible - for example, to make httpd's
- main() visible add in apps/httpd/httpd.cc, before the main() definition,
- the chant: [[gnu::visibility("default")]]
- * The "-DHAVE_OSV" conditionally compiles code in Seastar that relies
- on OSv APIs (currently, this enables virtio device assignment).
- This OSv-specific code relies on some OSv header files, which is
- why the "-I../osv/include" is needed (where "../osv" is where the
- OSv source tree is open).
- * The "--mode release" is to compile only the release build. You'll
- usually not want to debug Seastar on OSv (it's easier to debug on Linux).
-
-
-2. Building a Seastar-httpd module for OSv
-------------------------------------------
-
-As an example, we'll build a "seastar" module in OSv running Seastar's
-httpd application.
-
-In the OSv working directory, create a directory apps/seastar in it put:
-
-* a link to the httpd binary in the Seastar working directory. I.e.,
- ln -s ../../../seastar/build/release/apps/httpd/httpd httpd
-
-* A usr.manifest file, adding only this single "httpd" executable to the image:
- /httpd: ${MODULE_DIR}/httpd
-
-* A module.py file with a default command line:
-
- from osv.modules import api
- default = api.run(cmdline="/httpd --no-handle-interrupt")
-
-The "--no-handle-interrupt" is needed so that Seastar does not attempt to
-use signalfd() to capture ^C. signalfd is not yet available on OSv, and
-the ^C handling is not a very important feature of Seastar anyway.
-
-Also note that currently we cannot specify "--network-stack=native", because
-neither vhost nor a more efficient mechanism for "virtio assignment" is yet
-complete on OSv. So we must keep the default (which is "--network-stack=posix")
-which uses the OS's Posix networking APIs, which OSv fully supports.
-
-
-3. Running the seastar module on OSv
--------------------------------------
-
-To run the Seastar httpd application, using the module defined above, do,
-as usual, in the OSv working directory:
-
-$ make image=seastar -j4
-$ sudo scripts/run.py -nvV
-
-This will open an HTTP server on port 10000 of the VM. For example, if the
-above creates a VM with an IP address of 192.168.122.89, we can test it as
-following:
-
-$ curl 192.168.122.89:10000
-
this is the futureFuture!!
-
-4. Debugging OSv with the Seastar application
----------------------------------------------
-
-If you want to debug OSv (not the Seastar application) in relation to the
-way it runs Seastar, you'll want the "httpd" shared object to be available
-to gdb.
-Unfortunately, the object lookup code in "osv syms" (translate() in loader.py)
-does not seem to look for objects in apps/, so until we fix this, we need
-to put a link to httpd in a well-known place, such as build/release. So
-do this in the OSv top directory:
- ln -s ../../apps/seastar/httpd build/release/httpd
-
-Note you'll need to repeat this if you do "make clean" (as "make clean"
-removes everything in build/release).
diff --git a/README.md b/README.md
deleted file mode 100644
index 861f2655e0..0000000000
--- a/README.md
+++ /dev/null
@@ -1,348 +0,0 @@
-Seastar
-=======
-
-Introduction
-------------
-
-SeaStar is an event-driven framework allowing you to write non-blocking,
-asynchronous code in a relatively straightforward manner (once understood).
-It is based on [futures](http://en.wikipedia.org/wiki/Futures_and_promises).
-
-Building Seastar
---------------------
-
-
-### Building seastar on Fedora 21
-
-Installing required packages:
-```
-yum install gcc-c++ libaio-devel ninja-build ragel hwloc-devel numactl-devel libpciaccess-devel cryptopp-devel xen-devel boost-devel
-```
-
-You then need to run the following to create the "build.ninja" file:
-```
-./configure.py
-```
-Note it is enough to run this once, and you don't need to repeat it before
-every build. build.ninja includes a rule which will automatically re-run
-./configure.py if it changes.
-
-Then finally:
-```
-ninja-build
-```
-
-### Building seastar on Fedora 20
-
-Installing GCC 4.9 for gnu++1y:
-* Beware that this installation will replace your current GCC version.
-```
-yum install fedora-release-rawhide
-yum --enablerepo rawhide update gcc-c++
-yum --enablerepo rawhide install libubsan libasan
-```
-
-Installing required packages:
-```
-yum install libaio-devel ninja-build ragel hwloc-devel numactl-devel libpciaccess-devel cryptopp-devel
-```
-
-You then need to run the following to create the "build.ninja" file:
-```
-./configure.py
-```
-Note it is enough to run this once, and you don't need to repeat it before
-every build. build.ninja includes a rule which will automatically re-run
-./configure.py if it changes.
-
-Then finally:
-```
-ninja-build
-```
-
-### Building seastar on Ubuntu 14.04
-
-Installing required packages:
-```
-sudo apt-get install libaio-dev ninja-build ragel libhwloc-dev libnuma-dev libpciaccess-dev libcrypto++-dev libboost-all-dev
-```
-
-Installing GCC 4.9 for gnu++1y. Unlike the Fedora case above, this will
-not harm the existing installation of GCC 4.8, and will install an
-additional set of compilers, and additional commands named gcc-4.9,
-g++-4.9, etc., that need to be used explicitly, while the "gcc", "g++",
-etc., commands continue to point to the 4.8 versions.
-
-```
-# Install add-apt-repository
-sudo apt-get install software-properties-common python-software-properties
-# Use it to add Ubuntu's testing compiler repository
-sudo add-apt-repository ppa:ubuntu-toolchain-r/test
-sudo apt-get update
-# Install gcc 4.9 and relatives
-sudo apt-get install g++-4.9
-# Also set up necessary header file links and stuff (?)
-sudo apt-get install gcc-4.9-multilib g++-4.9-multilib
-```
-
-To compile Seastar explicitly using gcc 4.9, use:
-```
-./configure.py --compiler=g++-4.9
-```
-
-To compile OSv explicitly using gcc 4.9, use:
-```
-make CC=gcc-4.9 CXX=g++-4.9 -j 24
-```
-
-### Building seastar in Docker container
-
-To build a Docker image:
-
-```
-docker build -t seastar-dev docker/dev
-```
-
-Create an shell function for building insider the container (bash syntax given):
-
-```
-$ seabuild() { docker run -v $HOME/seastar/:/seastar -u $(id -u):$(id -g) -w /seastar -t seastar-dev "$@"; }
-```
-
-(it is recommended to put this inside your .bashrc or similar)
-
-To build inside a container:
-
-```
-$ seabuild ./configure.py
-$ seabuild ninja-build
-```
-
-### Building with a DPDK network backend
-
- 1. Setup host to compile DPDK:
- - Ubuntu
- `sudo apt-get install -y build-essential linux-image-extra-$(uname -r$)`
- 2. Run a configure.py: `./configure.py --enable-dpdk`.
- 3. Run `ninja-build`.
-
-To run with the DPDK backend for a native stack give the seastar application `--dpdk-pmd 1` parameter.
-
-You can also configure DPDK as an [external package](README-DPDK.md).
-
-Futures and promises
---------------------
-
-A *future* is a result of a computation that may not be available yet.
-Examples include:
-
- * a data buffer that we are reading from the network
- * the expiration of a timer
- * the completion of a disk write
- * the result computation that requires the values from
- one or more other futures.
-
-a *promise* is an object or function that provides you with a future,
-with the expectation that it will fulfill the future.
-
-Promises and futures simplify asynchronous programming since they decouple
-the event producer (the promise) and the event consumer (whoever uses the
-future). Whether the promise is fulfilled before the future is consumed,
-or vice versa, does not change the outcome of the code.
-
-Consuming a future
-------------------
-
-You consume a future by using its *then()* method, providing it with a
-callback (typically a lambda). For example, consider the following
-operation:
-
-```C++
-future get(); // promises an int will be produced eventually
-future<> put(int) // promises to store an int
-
-void f() {
- get().then([] (int value) {
- put(value + 1).then([] {
- std::cout << "value stored successfully\n";
- });
- });
-}
-```
-
-Here, we initiate a *get()* operation, requesting that when it completes, a
-*put()* operation will be scheduled with an incremented value. We also
-request that when the *put()* completes, some text will be printed out.
-
-Chaining futures
-----------------
-
-If a *then()* lambda returns a future (call it x), then that *then()*
-will return a future (call it y) that will receive the same value. This
-removes the need for nesting lambda blocks; for example the code above
-could be rewritten as:
-
-```C++
-future get(); // promises an int will be produced eventually
-future<> put(int) // promises to store an int
-
-void f() {
- get().then([] (int value) {
- return put(value + 1);
- }).then([] {
- std::cout << "value stored successfully\n";
- });
-}
-```
-
-Loops
------
-
-Loops are achieved with a tail call; for example:
-
-```C++
-future get(); // promises an int will be produced eventually
-future<> put(int) // promises to store an int
-
-future<> loop_to(int end) {
- if (value == end) {
- return make_ready_future<>();
- }
- get().then([end] (int value) {
- return put(value + 1);
- }).then([end] {
- return loop_to(end);
- });
-}
-```
-
-The *make_ready_future()* function returns a future that is already
-available --- corresponding to the loop termination condition, where
-no further I/O needs to take place.
-
-Under the hood
---------------
-
-When the loop above runs, both *then* method calls execute immediately
---- but without executing the bodies. What happens is the following:
-
-1. `get()` is called, initiates the I/O operation, and allocates a
- temporary structure (call it `f1`).
-2. The first `then()` call chains its body to `f1` and allocates
- another temporary structure, `f2`.
-3. The second `then()` call chains its body to `f2`.
-
-Again, all this runs immediately without waiting for anything.
-
-After the I/O operation initiated by `get()` completes, it calls the
-continuation stored in `f1`, calls it, and frees `f1`. The continuation
-calls `put()`, which initiates the I/O operation required to perform
-the store, and allocates a temporary object `f12`, and chains some glue
-code to it.
-
-After the I/O operation initiated by `put()` completes, it calls the
-continuation associated with `f12`, which simply tells it to call the
-continuation associated with `f2`. This continuation simply calls
-`loop_to()`. Both `f12` and `f2` are freed. `loop_to()` then calls
-`get()`, which starts the process all over again, allocating new versions
-of `f1` and `f2`.
-
-Handling exceptions
--------------------
-
-If a `.then()` clause throws an exception, the scheduler will catch it
-and cancel any dependent `.then()` clauses. If you want to trap the
-exception, add a `.then_wrapped()` clause at the end:
-
-```C++
-future receive();
-request parse(buffer buf);
-future process(request req);
-future<> send(response resp);
-
-void f() {
- receive().then([] (buffer buf) {
- return process(parse(std::move(buf));
- }).then([] (response resp) {
- return send(std::move(resp));
- }).then([] {
- f();
- }).then_wrapped([] (auto&& f) {
- try {
- f.get();
- } catch (std::exception& e) {
- // your handler goes here
- }
- });
-}
-```
-
-The previous future is passed as a parameter to the lambda, and its value can
-be inspected with `f.get()`. When the `get()` variable is called as a
-function, it will re-throw the exception that aborted processing, and you can
-then apply any needed error handling. It is essentially a transformation of
-
-```C++
-buffer receive();
-request parse(buffer buf);
-response process(request req);
-void send(response resp);
-
-void f() {
- try {
- while (true) {
- auto req = parse(receive());
- auto resp = process(std::move(req));
- send(std::move(resp));
- }
- } catch (std::exception& e) {
- // your handler goes here
- }
-}
-```
-
-Note, however, that the `.then_wrapped()` clause will be scheduled both when
-exception occurs or not. Therefore, the mere fact that `.then_wrapped()` is
-executed does not mean that an exception was thrown. Only the execution of the
-catch block can guarantee that.
-
-
-This is shown below:
-
-```C++
-
-future my_future();
-
-void f() {
- receive().then_wrapped([] (future f) {
- try {
- my_type x = f.get();
- return do_something(x);
- } catch (std::exception& e) {
- // your handler goes here
- }
- });
-}
-```
-### Setup notes
-
-SeaStar is a high performance framework and tuned to get the best
-performance by default. As such, we're tuned towards polling vs interrupt
-driven. Our assumption is that applications written for SeaStar will be
-busy handling 100,000 IOPS and beyond. Polling means that each of our
-cores will consume 100% cpu even when no work is given to it.
-
-
-Recommended hardware configuration for SeaStar
-----------------------------------------------
-
-* CPUs - As much as you need. SeaStar is highly friendly for multi-core and NUMA
-* NICs - As fast as possible, we recommend 10G or 40G cards. It's possible to use
- 1G to but you may be limited by their capacity.
- In addition, the more hardware queue per cpu the better for SeaStar.
- Otherwise we have to emulate that in software.
-* Disks - Fast SSDs with high number of IOPS.
-* Client machines - Usually a single client machine can't load our servers.
- Both memaslap (memcached) and WRK (httpd) cannot over load their matching
- server counter parts. We recommend running the client on different machine
- than the servers and use several of them.
diff --git a/apps/httpd/demo.json b/apps/httpd/demo.json
deleted file mode 100644
index 12261c453f..0000000000
--- a/apps/httpd/demo.json
+++ /dev/null
@@ -1,73 +0,0 @@
-{
- "apiVersion": "0.0.1",
- "swaggerVersion": "1.2",
- "basePath": "{{Protocol}}://{{Host}}",
- "resourcePath": "/hello",
- "produces": [
- "application/json"
- ],
- "apis": [
- {
- "path": "/hello/world/{var1}/{var2}",
- "operations": [
- {
- "method": "GET",
- "summary": "Returns the number of seconds since the system was booted",
- "type": "long",
- "nickname": "hello_world",
- "produces": [
- "application/json"
- ],
- "parameters": [
- {
- "name":"var2",
- "description":"Full path of file or directory",
- "required":true,
- "allowMultiple":true,
- "type":"string",
- "paramType":"path"
- },
- {
- "name":"var1",
- "description":"Full path of file or directory",
- "required":true,
- "allowMultiple":false,
- "type":"string",
- "paramType":"path"
- },
- {
- "name":"query_enum",
- "description":"The operation to perform",
- "required":true,
- "allowMultiple":false,
- "type":"string",
- "paramType":"query",
- "enum":["VAL1", "VAL2", "VAL3"]
- }
- ]
- }
- ]
- }
- ],
- "models" : {
- "my_object": {
- "id": "my_object",
- "description": "Demonstrate an object",
- "properties": {
- "var1": {
- "type": "string",
- "description": "The first parameter in the path"
- },
- "var2": {
- "type": "string",
- "description": "The second parameter in the path"
- },
- "enum_var" : {
- "type": "string",
- "description": "Demonstrate an enum returned, note this is not the same enum type of the request",
- "enum":["VAL1", "VAL2", "VAL3"]
- }
- }
- }
- }
-}
diff --git a/apps/httpd/main.cc b/apps/httpd/main.cc
deleted file mode 100644
index 27bebbcb40..0000000000
--- a/apps/httpd/main.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright 2015 Cloudius Systems
- */
-
-#include "http/httpd.hh"
-#include "http/handlers.hh"
-#include "http/function_handlers.hh"
-#include "http/file_handler.hh"
-#include "apps/httpd/demo.json.hh"
-#include "http/api_docs.hh"
-
-namespace bpo = boost::program_options;
-
-using namespace httpd;
-
-class handl : public httpd::handler_base {
-public:
- virtual future > handle(const sstring& path,
- std::unique_ptr req, std::unique_ptr rep) {
- rep->_content = "hello";
- rep->done("html");
- return make_ready_future>(std::move(rep));
- }
-};
-
-void set_routes(routes& r) {
- function_handler* h1 = new function_handler([](const_req req) {
- return "hello";
- });
- function_handler* h2 = new function_handler([](std::unique_ptr req) {
- return make_ready_future("json-future");
- });
- r.add(operation_type::GET, url("/"), h1);
- r.add(operation_type::GET, url("/jf"), h2);
- r.add(operation_type::GET, url("/file").remainder("path"),
- new directory_handler("/"));
- demo_json::hello_world.set(r, [] (const_req req) {
- demo_json::my_object obj;
- obj.var1 = req.param.at("var1");
- obj.var2 = req.param.at("var2");
- demo_json::ns_hello_world::query_enum v = demo_json::ns_hello_world::str2query_enum(req.query_parameters.at("query_enum"));
- // This demonstrate enum conversion
- obj.enum_var = v;
- return obj;
- });
-}
-
-int main(int ac, char** av) {
- app_template app;
- app.add_options()("port", bpo::value()->default_value(10000),
- "HTTP Server port");
- return app.run(ac, av, [&] {
- auto&& config = app.configuration();
- uint16_t port = config["port"].as();
- auto server = new http_server_control();
- auto rb = make_shared("apps/httpd/");
- server->start().then([server] {
- return server->set_routes(set_routes);
- }).then([server, rb]{
- return server->set_routes([rb](routes& r){rb->set_api_doc(r);});
- }).then([server, rb]{
- return server->set_routes([rb](routes& r) {rb->register_function(r, "demo", "hello world application");});
- }).then([server, port] {
- return server->listen(port);
- }).then([server, port] {
- std::cout << "Seastar HTTP server listening on port " << port << " ...\n";
- engine().at_exit([server] {
- return server->stop();
- });
- });
-
- });
-}
diff --git a/apps/memcached/ascii.rl b/apps/memcached/ascii.rl
deleted file mode 100644
index f6f577c412..0000000000
--- a/apps/memcached/ascii.rl
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-#include "core/ragel.hh"
-#include "apps/memcached/memcached.hh"
-#include
-#include
-#include
-
-%%{
-
-machine memcache_ascii_protocol;
-
-access _fsm_;
-
-action mark {
- g.mark_start(p);
-}
-
-action start_blob {
- g.mark_start(p);
- _size_left = _size;
-}
-
-action advance_blob {
- auto len = std::min((uint32_t)(pe - p), _size_left);
- _size_left -= len;
- p += len;
- if (_size_left == 0) {
- _blob = str();
- p--;
- fret;
- }
- p--;
-}
-
-crlf = '\r\n';
-sp = ' ';
-u32 = digit+ >{ _u32 = 0; } ${ _u32 *= 10; _u32 += fc - '0'; };
-u64 = digit+ >{ _u64 = 0; } ${ _u64 *= 10; _u64 += fc - '0'; };
-key = [^ ]+ >mark %{ _key = memcache::item_key(str()); };
-flags = digit+ >mark %{ _flags_str = str(); };
-expiration = u32 %{ _expiration = _u32; };
-size = u32 >mark %{ _size = _u32; _size_str = str(); };
-blob := any+ >start_blob $advance_blob;
-maybe_noreply = (sp "noreply" @{ _noreply = true; })? >{ _noreply = false; };
-maybe_expiration = (sp expiration)? >{ _expiration = 0; };
-version_field = u64 %{ _version = _u64; };
-
-insertion_params = sp key sp flags sp expiration sp size maybe_noreply (crlf @{ fcall blob; } ) crlf;
-set = "set" insertion_params @{ _state = state::cmd_set; };
-add = "add" insertion_params @{ _state = state::cmd_add; };
-replace = "replace" insertion_params @{ _state = state::cmd_replace; };
-cas = "cas" sp key sp flags sp expiration sp size sp version_field maybe_noreply (crlf @{ fcall blob; } ) crlf @{ _state = state::cmd_cas; };
-get = "get" (sp key %{ _keys.emplace_back(std::move(_key)); })+ crlf @{ _state = state::cmd_get; };
-gets = "gets" (sp key %{ _keys.emplace_back(std::move(_key)); })+ crlf @{ _state = state::cmd_gets; };
-delete = "delete" sp key maybe_noreply crlf @{ _state = state::cmd_delete; };
-flush = "flush_all" maybe_expiration maybe_noreply crlf @{ _state = state::cmd_flush_all; };
-version = "version" crlf @{ _state = state::cmd_version; };
-stats = "stats" crlf @{ _state = state::cmd_stats; };
-stats_hash = "stats hash" crlf @{ _state = state::cmd_stats_hash; };
-incr = "incr" sp key sp u64 maybe_noreply crlf @{ _state = state::cmd_incr; };
-decr = "decr" sp key sp u64 maybe_noreply crlf @{ _state = state::cmd_decr; };
-main := (add | replace | set | get | gets | delete | flush | version | cas | stats | incr | decr
- | stats_hash) >eof{ _state = state::eof; };
-
-prepush {
- prepush();
-}
-
-postpop {
- postpop();
-}
-
-}%%
-
-class memcache_ascii_parser : public ragel_parser_base {
- %% write data nofinal noprefix;
-public:
- enum class state {
- error,
- eof,
- cmd_set,
- cmd_cas,
- cmd_add,
- cmd_replace,
- cmd_get,
- cmd_gets,
- cmd_delete,
- cmd_flush_all,
- cmd_version,
- cmd_stats,
- cmd_stats_hash,
- cmd_incr,
- cmd_decr,
- };
- state _state;
- uint32_t _u32;
- uint64_t _u64;
- memcache::item_key _key;
- sstring _flags_str;
- uint32_t _expiration;
- uint32_t _size;
- sstring _size_str;
- uint32_t _size_left;
- uint64_t _version;
- sstring _blob;
- bool _noreply;
- std::vector _keys;
-public:
- void init() {
- init_base();
- _state = state::error;
- _keys.clear();
- %% write init;
- }
-
- char* parse(char* p, char* pe, char* eof) {
- sstring_builder::guard g(_builder, p, pe);
- auto str = [this, &g, &p] { g.mark_end(p); return get_str(); };
- %% write exec;
- if (_state != state::error) {
- return p;
- }
- if (p != pe) {
- p = pe;
- return p;
- }
- return nullptr;
- }
- bool eof() const {
- return _state == state::eof;
- }
-};
diff --git a/apps/memcached/memcache.cc b/apps/memcached/memcache.cc
deleted file mode 100644
index d9f1b1a72e..0000000000
--- a/apps/memcached/memcache.cc
+++ /dev/null
@@ -1,1405 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright 2014-2015 Cloudius Systems
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include "core/app-template.hh"
-#include "core/future-util.hh"
-#include "core/timer-set.hh"
-#include "core/shared_ptr.hh"
-#include "core/stream.hh"
-#include "core/memory.hh"
-#include "core/units.hh"
-#include "core/distributed.hh"
-#include "core/vector-data-sink.hh"
-#include "core/bitops.hh"
-#include "core/slab.hh"
-#include "core/align.hh"
-#include "net/api.hh"
-#include "net/packet-data-source.hh"
-#include "apps/memcached/ascii.hh"
-#include "memcached.hh"
-#include
-
-#define PLATFORM "seastar"
-#define VERSION "v1.0"
-#define VERSION_STRING PLATFORM " " VERSION
-
-using namespace net;
-
-namespace bi = boost::intrusive;
-
-namespace memcache {
-
-static constexpr double default_slab_growth_factor = 1.25;
-static constexpr uint64_t default_slab_page_size = 1UL*MB;
-static constexpr uint64_t default_per_cpu_slab_size = 0UL; // zero means reclaimer is enabled.
-static __thread slab_allocator- * slab;
-
-template
-using optional = boost::optional;
-
-struct expiration {
- static constexpr uint32_t seconds_in_a_month = 60U * 60 * 24 * 30;
- uint32_t _time;
-
- expiration() : _time(0U) {}
-
- expiration(uint32_t seconds) {
- if (seconds == 0U) {
- _time = 0U; // means never expire.
- } else if (seconds <= seconds_in_a_month) {
- _time = seconds + time(0); // from delta
- } else {
- _time = seconds; // from real time
- }
- }
-
- bool ever_expires() {
- return _time;
- }
-
- clock_type::time_point to_time_point() {
- return clock_type::time_point(std::chrono::seconds(_time));
- }
-};
-
-class item : public slab_item_base {
-public:
- using version_type = uint64_t;
- using time_point = clock_type::time_point;
- using duration = clock_type::duration;
- static constexpr uint8_t field_alignment = alignof(void*);
-private:
- using hook_type = bi::unordered_set_member_hook<>;
- // TODO: align shared data to cache line boundary
- version_type _version;
- hook_type _cache_link;
- bi::list_member_hook<> _timer_link;
- size_t _key_hash;
- expiration _expiry;
- uint32_t _value_size;
- uint32_t _slab_page_index;
- uint16_t _ref_count;
- uint8_t _key_size;
- uint8_t _ascii_prefix_size;
- char _data[]; // layout: data=key, (data+key_size)=ascii_prefix, (data+key_size+ascii_prefix_size)=value.
- friend class cache;
-public:
- item(uint32_t slab_page_index, item_key&& key, sstring&& ascii_prefix,
- sstring&& value, expiration expiry, version_type version = 1)
- : _version(version)
- , _key_hash(key.hash())
- , _expiry(expiry)
- , _value_size(value.size())
- , _slab_page_index(slab_page_index)
- , _ref_count(0U)
- , _key_size(key.key().size())
- , _ascii_prefix_size(ascii_prefix.size())
- {
- assert(_key_size <= std::numeric_limits::max());
- assert(_ascii_prefix_size <= std::numeric_limits::max());
- // storing key
- memcpy(_data, key.key().c_str(), _key_size);
- // storing ascii_prefix
- memcpy(_data + align_up(_key_size, field_alignment), ascii_prefix.c_str(), _ascii_prefix_size);
- // storing value
- memcpy(_data + align_up(_key_size, field_alignment) + align_up(_ascii_prefix_size, field_alignment),
- value.c_str(), _value_size);
- }
-
- item(const item&) = delete;
- item(item&&) = delete;
-
- clock_type::time_point get_timeout() {
- return _expiry.to_time_point();
- }
-
- version_type version() {
- return _version;
- }
-
- const std::experimental::string_view key() const {
- return std::experimental::string_view(_data, _key_size);
- }
-
- const std::experimental::string_view ascii_prefix() const {
- const char *p = _data + align_up(_key_size, field_alignment);
- return std::experimental::string_view(p, _ascii_prefix_size);
- }
-
- const std::experimental::string_view value() const {
- const char *p = _data + align_up(_key_size, field_alignment) +
- align_up(_ascii_prefix_size, field_alignment);
- return std::experimental::string_view(p, _value_size);
- }
-
- size_t key_size() const {
- return _key_size;
- }
-
- size_t ascii_prefix_size() const {
- return _ascii_prefix_size;
- }
-
- size_t value_size() const {
- return _value_size;
- }
-
- optional data_as_integral() {
- auto str = value().data();
- if (str[0] == '-') {
- return {};
- }
-
- auto len = _value_size;
-
- // Strip trailing space
- while (len && str[len - 1] == ' ') {
- len--;
- }
-
- try {
- return {boost::lexical_cast(str, len)};
- } catch (const boost::bad_lexical_cast& e) {
- return {};
- }
- }
-
- // needed by timer_set
- bool cancel() {
- return false;
- }
-
- // Methods required by slab allocator.
- uint32_t get_slab_page_index() const {
- return _slab_page_index;
- }
- bool is_unlocked() const {
- return _ref_count == 1;
- }
-
- friend bool operator==(const item &a, const item &b) {
- return (a._key_hash == b._key_hash) &&
- (a._key_size == b._key_size) &&
- (memcmp(a._data, b._data, a._key_size) == 0);
- }
-
- friend std::size_t hash_value(const item &i) {
- return i._key_hash;
- }
-
- friend inline void intrusive_ptr_add_ref(item* it) {
- assert(it->_ref_count >= 0);
- ++it->_ref_count;
- if (it->_ref_count == 2) {
- slab->lock_item(it);
- }
- }
-
- friend inline void intrusive_ptr_release(item* it) {
- --it->_ref_count;
- if (it->_ref_count == 1) {
- slab->unlock_item(it);
- } else if (it->_ref_count == 0) {
- slab->free(it);
- }
- assert(it->_ref_count >= 0);
- }
-
- friend class item_key_cmp;
-};
-
-struct item_key_cmp
-{
-private:
- bool compare(const item_key& key, const item& it) const {
- return (it._key_hash == key.hash()) &&
- (it._key_size == key.key().size()) &&
- (memcmp(it._data, key.key().c_str(), it._key_size) == 0);
- }
-public:
- bool operator()(const item_key& key, const item& it) const {
- return compare(key, it);
- }
-
- bool operator()(const item& it, const item_key& key) const {
- return compare(key, it);
- }
-};
-
-using item_ptr = foreign_ptr>;
-
-struct cache_stats {
- size_t _get_hits {};
- size_t _get_misses {};
- size_t _set_adds {};
- size_t _set_replaces {};
- size_t _cas_hits {};
- size_t _cas_misses {};
- size_t _cas_badval {};
- size_t _delete_misses {};
- size_t _delete_hits {};
- size_t _incr_misses {};
- size_t _incr_hits {};
- size_t _decr_misses {};
- size_t _decr_hits {};
- size_t _expired {};
- size_t _evicted {};
- size_t _bytes {};
- size_t _resize_failure {};
- size_t _size {};
- size_t _reclaims{};
-
- void operator+=(const cache_stats& o) {
- _get_hits += o._get_hits;
- _get_misses += o._get_misses;
- _set_adds += o._set_adds;
- _set_replaces += o._set_replaces;
- _cas_hits += o._cas_hits;
- _cas_misses += o._cas_misses;
- _cas_badval += o._cas_badval;
- _delete_misses += o._delete_misses;
- _delete_hits += o._delete_hits;
- _incr_misses += o._incr_misses;
- _incr_hits += o._incr_hits;
- _decr_misses += o._decr_misses;
- _decr_hits += o._decr_hits;
- _expired += o._expired;
- _evicted += o._evicted;
- _bytes += o._bytes;
- _resize_failure += o._resize_failure;
- _size += o._size;
- _reclaims += o._reclaims;
- }
-};
-
-enum class cas_result {
- not_found, stored, bad_version
-};
-
-struct remote_origin_tag {
- template
- static inline
- T move_if_local(T& ref) {
- return ref;
- }
-};
-
-struct local_origin_tag {
- template
- static inline
- T move_if_local(T& ref) {
- return std::move(ref);
- }
-};
-
-struct item_insertion_data {
- item_key key;
- sstring ascii_prefix;
- sstring data;
- expiration expiry;
-};
-
-class cache {
-private:
- using cache_type = bi::unordered_set
- ,
- bi::power_2_buckets,
- bi::constant_time_size>;
- using cache_iterator = typename cache_type::iterator;
- static constexpr size_t initial_bucket_count = 1 << 10;
- static constexpr float load_factor = 0.75f;
- size_t _resize_up_threshold = load_factor * initial_bucket_count;
- cache_type::bucket_type* _buckets;
- cache_type _cache;
- seastar::timer_set
- _alive;
- timer<> _timer;
- cache_stats _stats;
- timer<> _flush_timer;
-private:
- size_t item_size(item& item_ref) {
- constexpr size_t field_alignment = alignof(void*);
- return sizeof(item) +
- align_up(item_ref.key_size(), field_alignment) +
- align_up(item_ref.ascii_prefix_size(), field_alignment) +
- item_ref.value_size();
- }
-
- size_t item_size(item_insertion_data& insertion) {
- constexpr size_t field_alignment = alignof(void*);
- auto size = sizeof(item) +
- align_up(insertion.key.key().size(), field_alignment) +
- align_up(insertion.ascii_prefix.size(), field_alignment) +
- insertion.data.size();
-#ifdef __DEBUG__
- static bool print_item_footprint = true;
- if (print_item_footprint) {
- print_item_footprint = false;
- std::cout << __FUNCTION__ << ": " << size << "\n";
- std::cout << "sizeof(item) " << sizeof(item) << "\n";
- std::cout << "key.size " << insertion.key.key().size() << "\n";
- std::cout << "value.size " << insertion.data.size() << "\n";
- std::cout << "ascii_prefix.size " << insertion.ascii_prefix.size() << "\n";
- }
-#endif
- return size;
- }
-
- template
- void erase(item& item_ref) {
- if (IsInCache) {
- _cache.erase(_cache.iterator_to(item_ref));
- }
- if (IsInTimerList) {
- if (item_ref._expiry.ever_expires()) {
- _alive.remove(item_ref);
- }
- }
- _stats._bytes -= item_size(item_ref);
- if (Release) {
- // memory used by item shouldn't be freed when slab is replacing it with another item.
- intrusive_ptr_release(&item_ref);
- }
- }
-
- void expire() {
- auto exp = _alive.expire(clock_type::now());
- while (!exp.empty()) {
- auto item = &*exp.begin();
- exp.pop_front();
- erase(*item);
- _stats._expired++;
- }
- _timer.arm(_alive.get_next_timeout());
- }
-
- inline
- cache_iterator find(const item_key& key) {
- return _cache.find(key, std::hash(), item_key_cmp());
- }
-
- template
- inline
- cache_iterator add_overriding(cache_iterator i, item_insertion_data& insertion) {
- auto& old_item = *i;
- uint64_t old_item_version = old_item._version;
-
- erase(old_item);
-
- size_t size = item_size(insertion);
- auto new_item = slab->create(size, Origin::move_if_local(insertion.key), Origin::move_if_local(insertion.ascii_prefix),
- Origin::move_if_local(insertion.data), insertion.expiry, old_item_version + 1);
- intrusive_ptr_add_ref(new_item);
-
- auto insert_result = _cache.insert(*new_item);
- assert(insert_result.second);
- if (insertion.expiry.ever_expires() && _alive.insert(*new_item)) {
- _timer.rearm(new_item->get_timeout());
- }
- _stats._bytes += size;
- return insert_result.first;
- }
-
- template
- inline
- void add_new(item_insertion_data& insertion) {
- size_t size = item_size(insertion);
- auto new_item = slab->create(size, Origin::move_if_local(insertion.key), Origin::move_if_local(insertion.ascii_prefix),
- Origin::move_if_local(insertion.data), insertion.expiry);
- intrusive_ptr_add_ref(new_item);
- auto& item_ref = *new_item;
- _cache.insert(item_ref);
- if (insertion.expiry.ever_expires() && _alive.insert(item_ref)) {
- _timer.rearm(item_ref.get_timeout());
- }
- _stats._bytes += size;
- maybe_rehash();
- }
-
- void maybe_rehash() {
- if (_cache.size() >= _resize_up_threshold) {
- auto new_size = _cache.bucket_count() * 2;
- auto old_buckets = _buckets;
- try {
- _buckets = new cache_type::bucket_type[new_size];
- } catch (const std::bad_alloc& e) {
- _stats._resize_failure++;
- return;
- }
- _cache.rehash(typename cache_type::bucket_traits(_buckets, new_size));
- delete[] old_buckets;
- _resize_up_threshold = _cache.bucket_count() * load_factor;
- }
- }
-public:
- cache(uint64_t per_cpu_slab_size, uint64_t slab_page_size)
- : _buckets(new cache_type::bucket_type[initial_bucket_count])
- , _cache(cache_type::bucket_traits(_buckets, initial_bucket_count))
- {
- _timer.set_callback([this] { expire(); });
- _flush_timer.set_callback([this] { flush_all(); });
-
- // initialize per-thread slab allocator.
- slab = new slab_allocator
- (default_slab_growth_factor, per_cpu_slab_size, slab_page_size,
- [this](item& item_ref) { erase(item_ref); _stats._evicted++; });
-#ifdef __DEBUG__
- static bool print_slab_classes = true;
- if (print_slab_classes) {
- print_slab_classes = false;
- slab->print_slab_classes();
- }
-#endif
- }
-
- ~cache() {
- flush_all();
- }
-
- void flush_all() {
- _flush_timer.cancel();
- _cache.erase_and_dispose(_cache.begin(), _cache.end(), [this] (item* it) {
- erase(*it);
- });
- }
-
- void flush_at(clock_type::time_point time_point) {
- _flush_timer.rearm(time_point);
- }
-
- template
- bool set(item_insertion_data& insertion) {
- auto i = find(insertion.key);
- if (i != _cache.end()) {
- add_overriding(i, insertion);
- _stats._set_replaces++;
- return true;
- } else {
- add_new(insertion);
- _stats._set_adds++;
- return false;
- }
- }
-
- template
- bool add(item_insertion_data& insertion) {
- auto i = find(insertion.key);
- if (i != _cache.end()) {
- return false;
- }
-
- _stats._set_adds++;
- add_new(insertion);
- return true;
- }
-
- template
- bool replace(item_insertion_data& insertion) {
- auto i = find(insertion.key);
- if (i == _cache.end()) {
- return false;
- }
-
- _stats._set_replaces++;
- add_overriding(i, insertion);
- return true;
- }
-
- bool remove(const item_key& key) {
- auto i = find(key);
- if (i == _cache.end()) {
- _stats._delete_misses++;
- return false;
- }
- _stats._delete_hits++;
- auto& item_ref = *i;
- erase(item_ref);
- return true;
- }
-
- item_ptr get(const item_key& key) {
- auto i = find(key);
- if (i == _cache.end()) {
- _stats._get_misses++;
- return nullptr;
- }
- _stats._get_hits++;
- auto& item_ref = *i;
- return item_ptr(&item_ref);
- }
-
- template
- cas_result cas(item_insertion_data& insertion, item::version_type version) {
- auto i = find(insertion.key);
- if (i == _cache.end()) {
- _stats._cas_misses++;
- return cas_result::not_found;
- }
- auto& item_ref = *i;
- if (item_ref._version != version) {
- _stats._cas_badval++;
- return cas_result::bad_version;
- }
- _stats._cas_hits++;
- add_overriding(i, insertion);
- return cas_result::stored;
- }
-
- size_t size() {
- return _cache.size();
- }
-
- size_t bucket_count() {
- return _cache.bucket_count();
- }
-
- cache_stats stats() {
- _stats._size = size();
- return _stats;
- }
-
- template
- std::pair incr(item_key& key, uint64_t delta) {
- auto i = find(key);
- if (i == _cache.end()) {
- _stats._incr_misses++;
- return {item_ptr{}, false};
- }
- auto& item_ref = *i;
- _stats._incr_hits++;
- auto value = item_ref.data_as_integral();
- if (!value) {
- return {boost::intrusive_ptr
- (&item_ref), false};
- }
- item_insertion_data insertion {
- .key = Origin::move_if_local(key),
- .ascii_prefix = sstring(item_ref.ascii_prefix().data(), item_ref.ascii_prefix_size()),
- .data = to_sstring(*value + delta),
- .expiry = item_ref._expiry
- };
- i = add_overriding(i, insertion);
- return {boost::intrusive_ptr
- (&*i), true};
- }
-
- template
- std::pair decr(item_key& key, uint64_t delta) {
- auto i = find(key);
- if (i == _cache.end()) {
- _stats._decr_misses++;
- return {item_ptr{}, false};
- }
- auto& item_ref = *i;
- _stats._decr_hits++;
- auto value = item_ref.data_as_integral();
- if (!value) {
- return {boost::intrusive_ptr
- (&item_ref), false};
- }
- item_insertion_data insertion {
- .key = Origin::move_if_local(key),
- .ascii_prefix = sstring(item_ref.ascii_prefix().data(), item_ref.ascii_prefix_size()),
- .data = to_sstring(*value - std::min(*value, delta)),
- .expiry = item_ref._expiry
- };
- i = add_overriding(i, insertion);
- return {boost::intrusive_ptr
- (&*i), true};
- }
-
- std::pair>> print_hash_stats() {
- static constexpr unsigned bits = sizeof(size_t) * 8;
- size_t histo[bits + 1] {};
- size_t max_size = 0;
- unsigned max_bucket = 0;
-
- for (size_t i = 0; i < _cache.bucket_count(); i++) {
- size_t size = _cache.bucket_size(i);
- unsigned bucket;
- if (size == 0) {
- bucket = 0;
- } else {
- bucket = bits - count_leading_zeros(size);
- }
- max_bucket = std::max(max_bucket, bucket);
- max_size = std::max(max_size, size);
- histo[bucket]++;
- }
-
- std::stringstream ss;
-
- ss << "size: " << _cache.size() << "\n";
- ss << "buckets: " << _cache.bucket_count() << "\n";
- ss << "load: " << to_sstring_sprintf((double)_cache.size() / _cache.bucket_count(), "%.2lf") << "\n";
- ss << "max bucket occupancy: " << max_size << "\n";
- ss << "bucket occupancy histogram:\n";
-
- for (unsigned i = 0; i < (max_bucket + 2); i++) {
- ss << " ";
- if (i == 0) {
- ss << "0: ";
- } else if (i == 1) {
- ss << "1: ";
- } else {
- ss << (1 << (i - 1)) << "+: ";
- }
- ss << histo[i] << "\n";
- }
- return {engine().cpu_id(), make_foreign(make_lw_shared(ss.str()))};
- }
-
- future<> stop() { return make_ready_future<>(); }
-};
-
-class sharded_cache {
-private:
- distributed& _peers;
-
- inline
- unsigned get_cpu(const item_key& key) {
- return std::hash()(key) % smp::count;
- }
-public:
- sharded_cache(distributed& peers) : _peers(peers) {}
-
- future<> flush_all() {
- return _peers.invoke_on_all(&cache::flush_all);
- }
-
- future<> flush_at(clock_type::time_point time_point) {
- return _peers.invoke_on_all(&cache::flush_at, time_point);
- }
-
- // The caller must keep @insertion live until the resulting future resolves.
- future set(item_insertion_data& insertion) {
- auto cpu = get_cpu(insertion.key);
- if (engine().cpu_id() == cpu) {
- return make_ready_future(_peers.local().set(insertion));
- }
- return _peers.invoke_on(cpu, &cache::set, std::ref(insertion));
- }
-
- // The caller must keep @insertion live until the resulting future resolves.
- future add(item_insertion_data& insertion) {
- auto cpu = get_cpu(insertion.key);
- if (engine().cpu_id() == cpu) {
- return make_ready_future(_peers.local().add(insertion));
- }
- return _peers.invoke_on(cpu, &cache::add, std::ref(insertion));
- }
-
- // The caller must keep @insertion live until the resulting future resolves.
- future replace(item_insertion_data& insertion) {
- auto cpu = get_cpu(insertion.key);
- if (engine().cpu_id() == cpu) {
- return make_ready_future(_peers.local().replace(insertion));
- }
- return _peers.invoke_on(cpu, &cache::replace, std::ref(insertion));
- }
-
- // The caller must keep @key live until the resulting future resolves.
- future remove(const item_key& key) {
- auto cpu = get_cpu(key);
- return _peers.invoke_on(cpu, &cache::remove, std::ref(key));
- }
-
- // The caller must keep @key live until the resulting future resolves.
- future get(const item_key& key) {
- auto cpu = get_cpu(key);
- return _peers.invoke_on(cpu, &cache::get, std::ref(key));
- }
-
- // The caller must keep @insertion live until the resulting future resolves.
- future cas(item_insertion_data& insertion, item::version_type version) {
- auto cpu = get_cpu(insertion.key);
- if (engine().cpu_id() == cpu) {
- return make_ready_future(_peers.local().cas(insertion, version));
- }
- return _peers.invoke_on(cpu, &cache::cas, std::ref(insertion), std::move(version));
- }
-
- future stats() {
- return _peers.map_reduce(adder(), &cache::stats);
- }
-
- // The caller must keep @key live until the resulting future resolves.
- future> incr(item_key& key, uint64_t delta) {
- auto cpu = get_cpu(key);
- if (engine().cpu_id() == cpu) {
- return make_ready_future>(
- _peers.local().incr(key, delta));
- }
- return _peers.invoke_on(cpu, &cache::incr, std::ref(key), std::move(delta));
- }
-
- // The caller must keep @key live until the resulting future resolves.
- future> decr(item_key& key, uint64_t delta) {
- auto cpu = get_cpu(key);
- if (engine().cpu_id() == cpu) {
- return make_ready_future>(
- _peers.local().decr(key, delta));
- }
- return _peers.invoke_on(cpu, &cache::decr, std::ref(key), std::move(delta));
- }
-
- future<> print_hash_stats(output_stream& out) {
- return _peers.map_reduce([&out] (std::pair>> data) mutable {
- return out.write("=== CPU " + std::to_string(data.first) + " ===\r\n")
- .then([&out, str = std::move(data.second)] {
- return out.write(*str);
- });
- }, &cache::print_hash_stats);
- }
-};
-
-struct system_stats {
- uint32_t _curr_connections {};
- uint32_t _total_connections {};
- uint64_t _cmd_get {};
- uint64_t _cmd_set {};
- uint64_t _cmd_flush {};
- clock_type::time_point _start_time;
-public:
- system_stats() {
- _start_time = clock_type::time_point::max();
- }
- system_stats(clock_type::time_point start_time)
- : _start_time(start_time) {
- }
- system_stats self() {
- return *this;
- }
- void operator+=(const system_stats& other) {
- _curr_connections += other._curr_connections;
- _total_connections += other._total_connections;
- _cmd_get += other._cmd_get;
- _cmd_set += other._cmd_set;
- _cmd_flush += other._cmd_flush;
- _start_time = std::min(_start_time, other._start_time);
- }
- future<> stop() { return make_ready_future<>(); }
-};
-
-class ascii_protocol {
-private:
- using this_type = ascii_protocol;
- sharded_cache& _cache;
- distributed& _system_stats;
- memcache_ascii_parser _parser;
- item_key _item_key;
- item_insertion_data _insertion;
- std::vector _items;
-private:
- static constexpr const char *msg_crlf = "\r\n";
- static constexpr const char *msg_error = "ERROR\r\n";
- static constexpr const char *msg_stored = "STORED\r\n";
- static constexpr const char *msg_not_stored = "NOT_STORED\r\n";
- static constexpr const char *msg_end = "END\r\n";
- static constexpr const char *msg_value = "VALUE ";
- static constexpr const char *msg_deleted = "DELETED\r\n";
- static constexpr const char *msg_not_found = "NOT_FOUND\r\n";
- static constexpr const char *msg_ok = "OK\r\n";
- static constexpr const char *msg_version = "VERSION " VERSION_STRING "\r\n";
- static constexpr const char *msg_exists = "EXISTS\r\n";
- static constexpr const char *msg_stat = "STAT ";
- static constexpr const char *msg_out_of_memory = "SERVER_ERROR Out of memory allocating new item\r\n";
- static constexpr const char *msg_error_non_numeric_value = "CLIENT_ERROR cannot increment or decrement non-numeric value\r\n";
-private:
- template
- static void append_item(scattered_message& msg, item_ptr item) {
- if (!item) {
- return;
- }
-
- msg.append_static("VALUE ");
- msg.append_static(item->key());
- msg.append_static(item->ascii_prefix());
-
- if (WithVersion) {
- msg.append_static(" ");
- msg.append(to_sstring(item->version()));
- }
-
- msg.append_static(msg_crlf);
- msg.append_static(item->value());
- msg.append_static(msg_crlf);
- msg.on_delete([item = std::move(item)] {});
- }
-
- template
- future<> handle_get(output_stream& out) {
- _system_stats.local()._cmd_get++;
- if (_parser._keys.size() == 1) {
- return _cache.get(_parser._keys[0]).then([&out] (auto item) -> future<> {
- scattered_message msg;
- this_type::append_item(msg, std::move(item));
- msg.append_static(msg_end);
- return out.write(std::move(msg));
- });
- } else {
- _items.clear();
- return parallel_for_each(_parser._keys.begin(), _parser._keys.end(), [this] (const auto& key) {
- return _cache.get(key).then([this] (auto item) {
- _items.emplace_back(std::move(item));
- });
- }).then([this, &out] () {
- scattered_message msg;
- for (auto& item : _items) {
- append_item(msg, std::move(item));
- }
- msg.append_static(msg_end);
- return out.write(std::move(msg));
- });
- }
- }
-
- template
- static future<> print_stat(output_stream& out, const char* key, Value value) {
- return out.write(msg_stat)
- .then([&out, key] { return out.write(key); })
- .then([&out] { return out.write(" "); })
- .then([&out, value] { return out.write(to_sstring(value)); })
- .then([&out] { return out.write(msg_crlf); });
- }
-
- future<> print_stats(output_stream& out) {
- return _cache.stats().then([this, &out] (auto stats) {
- return _system_stats.map_reduce(adder(), &system_stats::self)
- .then([this, &out, all_cache_stats = std::move(stats)] (auto all_system_stats) -> future<> {
- auto now = clock_type::now();
- auto total_items = all_cache_stats._set_replaces + all_cache_stats._set_adds
- + all_cache_stats._cas_hits;
- return print_stat(out, "pid", getpid())
- .then([this, now, &out, uptime = now - all_system_stats._start_time] {
- return print_stat(out, "uptime",
- std::chrono::duration_cast(uptime).count());
- }).then([this, now, &out] {
- return print_stat(out, "time",
- std::chrono::duration_cast(now.time_since_epoch()).count());
- }).then([this, &out] {
- return print_stat(out, "version", VERSION_STRING);
- }).then([this, &out] {
- return print_stat(out, "pointer_size", sizeof(void*)*8);
- }).then([this, &out, v = all_system_stats._curr_connections] {
- return print_stat(out, "curr_connections", v);
- }).then([this, &out, v = all_system_stats._total_connections] {
- return print_stat(out, "total_connections", v);
- }).then([this, &out, v = all_system_stats._curr_connections] {
- return print_stat(out, "connection_structures", v);
- }).then([this, &out, v = all_system_stats._cmd_get] {
- return print_stat(out, "cmd_get", v);
- }).then([this, &out, v = all_system_stats._cmd_set] {
- return print_stat(out, "cmd_set", v);
- }).then([this, &out, v = all_system_stats._cmd_flush] {
- return print_stat(out, "cmd_flush", v);
- }).then([this, &out] {
- return print_stat(out, "cmd_touch", 0);
- }).then([this, &out, v = all_cache_stats._get_hits] {
- return print_stat(out, "get_hits", v);
- }).then([this, &out, v = all_cache_stats._get_misses] {
- return print_stat(out, "get_misses", v);
- }).then([this, &out, v = all_cache_stats._delete_misses] {
- return print_stat(out, "delete_misses", v);
- }).then([this, &out, v = all_cache_stats._delete_hits] {
- return print_stat(out, "delete_hits", v);
- }).then([this, &out, v = all_cache_stats._incr_misses] {
- return print_stat(out, "incr_misses", v);
- }).then([this, &out, v = all_cache_stats._incr_hits] {
- return print_stat(out, "incr_hits", v);
- }).then([this, &out, v = all_cache_stats._decr_misses] {
- return print_stat(out, "decr_misses", v);
- }).then([this, &out, v = all_cache_stats._decr_hits] {
- return print_stat(out, "decr_hits", v);
- }).then([this, &out, v = all_cache_stats._cas_misses] {
- return print_stat(out, "cas_misses", v);
- }).then([this, &out, v = all_cache_stats._cas_hits] {
- return print_stat(out, "cas_hits", v);
- }).then([this, &out, v = all_cache_stats._cas_badval] {
- return print_stat(out, "cas_badval", v);
- }).then([this, &out] {
- return print_stat(out, "touch_hits", 0);
- }).then([this, &out] {
- return print_stat(out, "touch_misses", 0);
- }).then([this, &out] {
- return print_stat(out, "auth_cmds", 0);
- }).then([this, &out] {
- return print_stat(out, "auth_errors", 0);
- }).then([this, &out] {
- return print_stat(out, "threads", smp::count);
- }).then([this, &out, v = all_cache_stats._size] {
- return print_stat(out, "curr_items", v);
- }).then([this, &out, v = total_items] {
- return print_stat(out, "total_items", v);
- }).then([this, &out, v = all_cache_stats._expired] {
- return print_stat(out, "seastar.expired", v);
- }).then([this, &out, v = all_cache_stats._resize_failure] {
- return print_stat(out, "seastar.resize_failure", v);
- }).then([this, &out, v = all_cache_stats._evicted] {
- return print_stat(out, "evictions", v);
- }).then([this, &out, v = all_cache_stats._bytes] {
- return print_stat(out, "bytes", v);
- }).then([&out] {
- return out.write(msg_end);
- });
- });
- });
- }
-public:
- ascii_protocol(sharded_cache& cache, distributed& system_stats)
- : _cache(cache)
- , _system_stats(system_stats)
- {}
-
- void prepare_insertion() {
- _insertion = item_insertion_data{
- .key = std::move(_parser._key),
- .ascii_prefix = make_sstring(" ", _parser._flags_str, " ", _parser._size_str),
- .data = std::move(_parser._blob),
- .expiry = expiration(_parser._expiration)
- };
- }
-
- future<> handle(input_stream& in, output_stream& out) {
- _parser.init();
- return in.consume(_parser).then([this, &out] () -> future<> {
- switch (_parser._state) {
- case memcache_ascii_parser::state::eof:
- return make_ready_future<>();
-
- case memcache_ascii_parser::state::error:
- return out.write(msg_error);
-
- case memcache_ascii_parser::state::cmd_set:
- {
- _system_stats.local()._cmd_set++;
- prepare_insertion();
- auto f = _cache.set(_insertion);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (...) {
- return out.write(msg_stored);
- });
- }
-
- case memcache_ascii_parser::state::cmd_cas:
- {
- _system_stats.local()._cmd_set++;
- prepare_insertion();
- auto f = _cache.cas(_insertion, _parser._version);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (auto result) {
- switch (result) {
- case cas_result::stored:
- return out.write(msg_stored);
- case cas_result::not_found:
- return out.write(msg_not_found);
- case cas_result::bad_version:
- return out.write(msg_exists);
- default:
- std::abort();
- }
- });
- }
-
- case memcache_ascii_parser::state::cmd_add:
- {
- _system_stats.local()._cmd_set++;
- prepare_insertion();
- auto f = _cache.add(_insertion);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (bool added) {
- return out.write(added ? msg_stored : msg_not_stored);
- });
- }
-
- case memcache_ascii_parser::state::cmd_replace:
- {
- _system_stats.local()._cmd_set++;
- prepare_insertion();
- auto f = _cache.replace(_insertion);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (auto replaced) {
- return out.write(replaced ? msg_stored : msg_not_stored);
- });
- }
-
- case memcache_ascii_parser::state::cmd_get:
- return handle_get(out);
-
- case memcache_ascii_parser::state::cmd_gets:
- return handle_get(out);
-
- case memcache_ascii_parser::state::cmd_delete:
- {
- auto f = _cache.remove(_parser._key);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (bool removed) {
- return out.write(removed ? msg_deleted : msg_not_found);
- });
- }
-
- case memcache_ascii_parser::state::cmd_flush_all:
- {
- _system_stats.local()._cmd_flush++;
- if (_parser._expiration) {
- auto expiry = expiration(_parser._expiration);
- auto f = _cache.flush_at(expiry.to_time_point());
- if (_parser._noreply) {
- return f;
- }
- return std::move(f).then([&out] {
- return out.write(msg_ok);
- });
- } else {
- auto f = _cache.flush_all();
- if (_parser._noreply) {
- return f;
- }
- return std::move(f).then([&out] {
- return out.write(msg_ok);
- });
- }
- }
-
- case memcache_ascii_parser::state::cmd_version:
- return out.write(msg_version);
-
- case memcache_ascii_parser::state::cmd_stats:
- return print_stats(out);
-
- case memcache_ascii_parser::state::cmd_stats_hash:
- return _cache.print_hash_stats(out);
-
- case memcache_ascii_parser::state::cmd_incr:
- {
- auto f = _cache.incr(_parser._key, _parser._u64);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (auto result) {
- auto item = std::move(result.first);
- if (!item) {
- return out.write(msg_not_found);
- }
- auto incremented = result.second;
- if (!incremented) {
- return out.write(msg_error_non_numeric_value);
- }
- return out.write(item->value().data(), item->value_size()).then([&out] {
- return out.write(msg_crlf);
- });
- });
- }
-
- case memcache_ascii_parser::state::cmd_decr:
- {
- auto f = _cache.decr(_parser._key, _parser._u64);
- if (_parser._noreply) {
- return std::move(f).discard_result();
- }
- return std::move(f).then([&out] (auto result) {
- auto item = std::move(result.first);
- if (!item) {
- return out.write(msg_not_found);
- }
- auto decremented = result.second;
- if (!decremented) {
- return out.write(msg_error_non_numeric_value);
- }
- return out.write(item->value().data(), item->value_size()).then([&out] {
- return out.write(msg_crlf);
- });
- });
- }
- };
- std::abort();
- }).then_wrapped([this, &out] (auto&& f) -> future<> {
- // FIXME: then_wrapped() being scheduled even though no exception was triggered has a
- // performance cost of about 2.6%. Not using it means maintainability penalty.
- try {
- f.get();
- } catch (std::bad_alloc& e) {
- if (_parser._noreply) {
- return make_ready_future<>();
- }
- return out.write(msg_out_of_memory);
- }
- return make_ready_future<>();
- });
- };
-};
-
-class udp_server {
-public:
- static const size_t default_max_datagram_size = 1400;
-private:
- sharded_cache& _cache;
- distributed& _system_stats;
- udp_channel _chan;
- uint16_t _port;
- size_t _max_datagram_size = default_max_datagram_size;
-
- struct header {
- packed _request_id;
- packed _sequence_number;
- packed _n;
- packed _reserved;
-
- template
- auto adjust_endianness(Adjuster a) {
- return a(_request_id, _sequence_number, _n);
- }
- } __attribute__((packed));
-
- struct connection {
- ipv4_addr _src;
- uint16_t _request_id;
- input_stream _in;
- output_stream _out;
- std::vector _out_bufs;
- ascii_protocol _proto;
-
- connection(ipv4_addr src, uint16_t request_id, input_stream&& in, size_t out_size,
- sharded_cache& c, distributed& system_stats)
- : _src(src)
- , _request_id(request_id)
- , _in(std::move(in))
- , _out(output_stream(data_sink(std::make_unique(_out_bufs)), out_size, true))
- , _proto(c, system_stats)
- {}
-
- future<> respond(udp_channel& chan) {
- int i = 0;
- return do_for_each(_out_bufs.begin(), _out_bufs.end(), [this, i, &chan] (packet& p) mutable {
- header* out_hdr = p.prepend_header(0);
- out_hdr->_request_id = _request_id;
- out_hdr->_sequence_number = i++;
- out_hdr->_n = _out_bufs.size();
- *out_hdr = hton(*out_hdr);
- return chan.send(_src, std::move(p));
- });
- }
- };
-
-public:
- udp_server(sharded_cache& c, distributed& system_stats, uint16_t port = 11211)
- : _cache(c)
- , _system_stats(system_stats)
- , _port(port)
- {}
-
- void set_max_datagram_size(size_t max_datagram_size) {
- _max_datagram_size = max_datagram_size;
- }
-
- void start() {
- _chan = engine().net().make_udp_channel({_port});
- keep_doing([this] {
- return _chan.receive().then([this](udp_datagram dgram) {
- packet& p = dgram.get_data();
- if (p.len() < sizeof(header)) {
- // dropping invalid packet
- return make_ready_future<>();
- }
-
- header hdr = ntoh(*p.get_header());
- p.trim_front(sizeof(hdr));
-
- auto request_id = hdr._request_id;
- auto in = as_input_stream(std::move(p));
- auto conn = make_lw_shared(dgram.get_src(), request_id, std::move(in),
- _max_datagram_size - sizeof(header), _cache, _system_stats);
-
- if (hdr._n != 1 || hdr._sequence_number != 0) {
- return conn->_out.write("CLIENT_ERROR only single-datagram requests supported\r\n").then([this, conn] {
- return conn->_out.flush().then([this, conn] {
- return conn->respond(_chan).then([conn] {});
- });
- });
- }
-
- return conn->_proto.handle(conn->_in, conn->_out).then([this, conn]() mutable {
- return conn->_out.flush().then([this, conn] {
- return conn->respond(_chan).then([conn] {});
- });
- });
- });
- }).or_terminate();
- };
-
- future<> stop() { return make_ready_future<>(); }
-};
-
-class tcp_server {
-private:
- lw_shared_ptr _listener;
- sharded_cache& _cache;
- distributed& _system_stats;
- uint16_t _port;
- struct connection {
- connected_socket _socket;
- socket_address _addr;
- input_stream _in;
- output_stream _out;
- ascii_protocol _proto;
- distributed& _system_stats;
- connection(connected_socket&& socket, socket_address addr, sharded_cache& c, distributed& system_stats)
- : _socket(std::move(socket))
- , _addr(addr)
- , _in(_socket.input())
- , _out(_socket.output())
- , _proto(c, system_stats)
- , _system_stats(system_stats)
- {
- _system_stats.local()._curr_connections++;
- _system_stats.local()._total_connections++;
- }
- ~connection() {
- _system_stats.local()._curr_connections--;
- }
- };
-public:
- tcp_server(sharded_cache& cache, distributed& system_stats, uint16_t port = 11211)
- : _cache(cache)
- , _system_stats(system_stats)
- , _port(port)
- {}
-
- void start() {
- listen_options lo;
- lo.reuse_address = true;
- _listener = engine().listen(make_ipv4_address({_port}), lo);
- keep_doing([this] {
- return _listener->accept().then([this] (connected_socket fd, socket_address addr) mutable {
- auto conn = make_lw_shared(std::move(fd), addr, _cache, _system_stats);
- do_until([conn] { return conn->_in.eof(); }, [this, conn] {
- return conn->_proto.handle(conn->_in, conn->_out).then([conn] {
- return conn->_out.flush();
- });
- });
- });
- }).or_terminate();
- }
-
- future<> stop() { return make_ready_future<>(); }
-};
-
-class stats_printer {
-private:
- timer<> _timer;
- sharded_cache& _cache;
-public:
- stats_printer(sharded_cache& cache)
- : _cache(cache) {}
-
- void start() {
- _timer.set_callback([this] {
- _cache.stats().then([this] (auto stats) {
- auto gets_total = stats._get_hits + stats._get_misses;
- auto get_hit_rate = gets_total ? ((double)stats._get_hits * 100 / gets_total) : 0;
- auto sets_total = stats._set_adds + stats._set_replaces;
- auto set_replace_rate = sets_total ? ((double)stats._set_replaces * 100/ sets_total) : 0;
- std::cout << "items: " << stats._size << " "
- << std::setprecision(2) << std::fixed
- << "get: " << stats._get_hits << "/" << gets_total << " (" << get_hit_rate << "%) "
- << "set: " << stats._set_replaces << "/" << sets_total << " (" << set_replace_rate << "%)";
- std::cout << std::endl;
- });
- });
- _timer.arm_periodic(std::chrono::seconds(1));
- }
-
- future<> stop() { return make_ready_future<>(); }
-};
-
-} /* namespace memcache */
-
-int main(int ac, char** av) {
- distributed cache_peers;
- memcache::sharded_cache cache(cache_peers);
- distributed system_stats;
- distributed udp_server;
- distributed tcp_server;
- memcache::stats_printer stats(cache);
-
- namespace bpo = boost::program_options;
- app_template app;
- app.add_options()
- ("max-datagram-size", bpo::value()->default_value(memcache::udp_server::default_max_datagram_size),
- "Maximum size of UDP datagram")
- ("max-slab-size", bpo::value()->default_value(memcache::default_per_cpu_slab_size/MB),
- "Maximum memory to be used for items (value in megabytes) (reclaimer is disabled if set)")
- ("slab-page-size", bpo::value()->default_value(memcache::default_slab_page_size/MB),
- "Size of slab page (value in megabytes)")
- ("stats",
- "Print basic statistics periodically (every second)")
- ("port", bpo::value()->default_value(11211),
- "Specify UDP and TCP ports for memcached server to listen on")
- ;
-
- return app.run(ac, av, [&] {
- engine().at_exit([&] { return tcp_server.stop(); });
- engine().at_exit([&] { return udp_server.stop(); });
- engine().at_exit([&] { return cache_peers.stop(); });
- engine().at_exit([&] { return system_stats.stop(); });
-
- auto&& config = app.configuration();
- uint16_t port = config["port"].as();
- uint64_t per_cpu_slab_size = config["max-slab-size"].as() * MB;
- uint64_t slab_page_size = config["slab-page-size"].as() * MB;
- return cache_peers.start(std::move(per_cpu_slab_size), std::move(slab_page_size)).then([&system_stats] {
- return system_stats.start(clock_type::now());
- }).then([&] {
- std::cout << PLATFORM << " memcached " << VERSION << "\n";
- return make_ready_future<>();
- }).then([&, port] {
- return tcp_server.start(std::ref(cache), std::ref(system_stats), port);
- }).then([&tcp_server] {
- return tcp_server.invoke_on_all(&memcache::tcp_server::start);
- }).then([&, port] {
- if (engine().net().has_per_core_namespace()) {
- return udp_server.start(std::ref(cache), std::ref(system_stats), port);
- } else {
- return udp_server.start_single(std::ref(cache), std::ref(system_stats), port);
- }
- }).then([&] {
- return udp_server.invoke_on_all(&memcache::udp_server::set_max_datagram_size,
- (size_t)config["max-datagram-size"].as());
- }).then([&] {
- return udp_server.invoke_on_all(&memcache::udp_server::start);
- }).then([&stats, start_stats = config.count("stats")] {
- if (start_stats) {
- stats.start();
- }
- });
- });
-}
diff --git a/apps/memcached/memcached.hh b/apps/memcached/memcached.hh
deleted file mode 100644
index 54fa217fcc..0000000000
--- a/apps/memcached/memcached.hh
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-#ifndef _MEMCACHED_HH
-#define _MEMCACHED_HH
-
-#include "core/sstring.hh"
-
-namespace memcache {
-
-class item;
-class cache;
-
-class item_key {
-private:
- sstring _key;
- size_t _hash;
-public:
- item_key() = default;
- item_key(item_key&) = default;
- item_key(sstring key)
- : _key(key)
- , _hash(std::hash()(key))
- {}
- item_key(item_key&& other)
- : _key(std::move(other._key))
- , _hash(other._hash)
- {
- other._hash = 0;
- }
- size_t hash() const {
- return _hash;
- }
- const sstring& key() const {
- return _key;
- }
- bool operator==(const item_key& other) const {
- return other._hash == _hash && other._key == _key;
- }
- void operator=(item_key&& other) {
- _key = std::move(other._key);
- _hash = other._hash;
- other._hash = 0;
- }
-};
-
-}
-
-namespace std {
-
-template <>
-struct hash {
- size_t operator()(const memcache::item_key& key) {
- return key.hash();
- }
-};
-
-} /* namespace std */
-
-#endif
diff --git a/apps/seawreck/http_response_parser.rl b/apps/seawreck/http_response_parser.rl
deleted file mode 100644
index 832223eeee..0000000000
--- a/apps/seawreck/http_response_parser.rl
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2015 Cloudius Systems, Ltd.
- */
-
-#include "core/ragel.hh"
-#include
-#include
-
-struct http_response {
- sstring _version;
- std::unordered_map _headers;
-};
-
-%% machine http_response;
-
-%%{
-
-access _fsm_;
-
-action mark {
- g.mark_start(p);
-}
-
-action store_version {
- _rsp->_version = str();
-}
-
-action store_field_name {
- _field_name = str();
-}
-
-action store_value {
- _value = str();
-}
-
-action assign_field {
- _rsp->_headers[_field_name] = std::move(_value);
-}
-
-action extend_field {
- _rsp->_headers[_field_name] += sstring(" ") + std::move(_value);
-}
-
-action done {
- done = true;
- fbreak;
-}
-
-cr = '\r';
-lf = '\n';
-crlf = '\r\n';
-tchar = alpha | digit | '-' | '!' | '#' | '$' | '%' | '&' | '\'' | '*'
- | '+' | '.' | '^' | '_' | '`' | '|' | '~';
-
-sp = ' ';
-ht = '\t';
-
-sp_ht = sp | ht;
-
-http_version = 'HTTP/' (digit '.' digit) >mark %store_version;
-
-field = tchar+ >mark %store_field_name;
-value = any* >mark %store_value;
-start_line = http_version space digit digit digit space (any - cr - lf)* crlf;
-header_1st = (field sp_ht* ':' value :> crlf) %assign_field;
-header_cont = (sp_ht+ value sp_ht* crlf) %extend_field;
-header = header_1st header_cont*;
-main := start_line header* :> (crlf @done);
-
-}%%
-
-class http_response_parser : public ragel_parser_base {
- %% write data nofinal noprefix;
-public:
- enum class state {
- error,
- eof,
- done,
- };
- std::unique_ptr _rsp;
- sstring _field_name;
- sstring _value;
- state _state;
-public:
- void init() {
- init_base();
- _rsp.reset(new http_response());
- _state = state::eof;
- %% write init;
- }
- char* parse(char* p, char* pe, char* eof) {
- sstring_builder::guard g(_builder, p, pe);
- auto str = [this, &g, &p] { g.mark_end(p); return get_str(); };
- bool done = false;
- if (p != pe) {
- _state = state::error;
- }
- %% write exec;
- if (!done) {
- p = nullptr;
- } else {
- _state = state::done;
- }
- return p;
- }
- auto get_parsed_response() {
- return std::move(_rsp);
- }
- bool eof() const {
- return _state == state::eof;
- }
-};
diff --git a/apps/seawreck/seawreck.cc b/apps/seawreck/seawreck.cc
deleted file mode 100644
index 28ed128c3b..0000000000
--- a/apps/seawreck/seawreck.cc
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2015 Cloudius Systems, Ltd.
- */
-
-#include "apps/seawreck/http_response_parser.hh"
-#include "core/print.hh"
-#include "core/reactor.hh"
-#include "core/app-template.hh"
-#include "core/future-util.hh"
-#include "core/distributed.hh"
-#include "core/semaphore.hh"
-#include "core/future-util.hh"
-#include
-
-template
-void http_debug(const char* fmt, Args&&... args) {
-#if HTTP_DEBUG
- print(fmt, std::forward(args)...);
-#endif
-}
-
-class http_client {
-private:
- unsigned _duration;
- unsigned _conn_per_core;
- unsigned _reqs_per_conn;
- std::vector _sockets;
- semaphore _conn_connected{0};
- semaphore _conn_finished{0};
- timer<> _run_timer;
- bool _timer_based;
- bool _timer_done{false};
- uint64_t _total_reqs{0};
-public:
- http_client(unsigned duration, unsigned total_conn, unsigned reqs_per_conn)
- : _duration(duration)
- , _conn_per_core(total_conn / smp::count)
- , _reqs_per_conn(reqs_per_conn)
- , _run_timer([this] { _timer_done = true; })
- , _timer_based(reqs_per_conn == 0) {
- }
-
- class connection {
- private:
- connected_socket _fd;
- input_stream _read_buf;
- output_stream _write_buf;
- http_response_parser _parser;
- http_client* _http_client;
- uint64_t _nr_done{0};
- public:
- connection(connected_socket&& fd, http_client* client)
- : _fd(std::move(fd))
- , _read_buf(_fd.input())
- , _write_buf(_fd.output())
- , _http_client(client){
- }
-
- uint64_t nr_done() {
- return _nr_done;
- }
-
- future<> do_req() {
- return _write_buf.write("GET / HTTP/1.1\r\nHost: 127.0.0.1:10000\r\n\r\n").then([this] {
- return _write_buf.flush();
- }).then([this] {
- _parser.init();
- return _read_buf.consume(_parser).then([this] {
- // Read HTTP response header first
- if (_parser.eof()) {
- return make_ready_future<>();
- }
- auto _rsp = _parser.get_parsed_response();
- auto it = _rsp->_headers.find("Content-Length");
- if (it == _rsp->_headers.end()) {
- print("Error: HTTP response does not contain: Content-Length\n");
- return make_ready_future<>();
- }
- auto content_len = std::stoi(it->second);
- http_debug("Content-Length = %d\n", content_len);
- // Read HTTP response body
- return _read_buf.read_exactly(content_len).then([this] (temporary_buffer buf) {
- _nr_done++;
- http_debug("%s\n", buf.get());
- if (_http_client->done(_nr_done)) {
- return make_ready_future();
- } else {
- return do_req();
- }
- });
- });
- });
- }
- };
-
- future total_reqs() {
- print("Requests on cpu %2d: %ld\n", engine().cpu_id(), _total_reqs);
- return make_ready_future(_total_reqs);
- }
-
- bool done(uint64_t nr_done) {
- if (_timer_based) {
- return _timer_done;
- } else {
- return nr_done >= _reqs_per_conn;
- }
- }
-
- future<> connect(ipv4_addr server_addr) {
- // Establish all the TCP connections first
- for (unsigned i = 0; i < _conn_per_core; i++) {
- engine().net().connect(make_ipv4_address(server_addr)).then([this] (connected_socket fd) {
- _sockets.push_back(std::move(fd));
- http_debug("Established connection %6d on cpu %3d\n", _conn_connected.current(), engine().cpu_id());
- _conn_connected.signal();
- }).or_terminate();
- }
- return _conn_connected.wait(_conn_per_core);
- }
-
- future<> run() {
- // All connected, start HTTP request
- http_debug("Established all %6d tcp connections on cpu %3d\n", _conn_per_core, engine().cpu_id());
- if (_timer_based) {
- _run_timer.arm(std::chrono::seconds(_duration));
- }
- for (auto&& fd : _sockets) {
- auto conn = new connection(std::move(fd), this);
- conn->do_req().then_wrapped([this, conn] (auto&& f) {
- http_debug("Finished connection %6d on cpu %3d\n", _conn_finished.current(), engine().cpu_id());
- _total_reqs += conn->nr_done();
- _conn_finished.signal();
- delete conn;
- try {
- f.get();
- } catch (std::exception& ex) {
- print("http request error: %s\n", ex.what());
- }
- });
- }
-
- // All finished
- return _conn_finished.wait(_conn_per_core);
- }
- future<> stop() {
- return make_ready_future();
- }
-};
-
-namespace bpo = boost::program_options;
-
-int main(int ac, char** av) {
- app_template app;
- app.add_options()
- ("server,s", bpo::value()->default_value("192.168.66.100:10000"), "Server address")
- ("conn,c", bpo::value()->default_value(100), "total connections")
- ("reqs,r", bpo::value()->default_value(0), "reqs per connection")
- ("duration,d", bpo::value()->default_value(10), "duration of the test in seconds)");
-
- return app.run(ac, av, [&app] {
- auto& config = app.configuration();
- auto server = config["server"].as();
- auto reqs_per_conn = config["reqs"].as();
- auto total_conn= config["conn"].as();
- auto duration = config["duration"].as();
-
- if (total_conn % smp::count != 0) {
- print("Error: conn needs to be n * cpu_nr\n");
- return engine().exit(0);
- }
-
- auto http_clients = new distributed;
-
- // Start http requests on all the cores
- auto started = std::chrono::high_resolution_clock::now();
- print("========== http_client ============\n");
- print("Server: %s\n", server);
- print("Connections: %u\n", total_conn);
- print("Requests/connection: %s\n", reqs_per_conn == 0 ? "dynamic (timer based)" : std::to_string(reqs_per_conn));
- http_clients->start(std::ref(duration), std::ref(total_conn), std::ref(reqs_per_conn)).then([http_clients, started, server] {
- return http_clients->invoke_on_all(&http_client::connect, ipv4_addr{server});
- }).then([http_clients] {
- return http_clients->invoke_on_all(&http_client::run);
- }).then([http_clients] {
- return http_clients->map_reduce(adder(), &http_client::total_reqs);
- }).then([http_clients, started] (auto total_reqs) {
- // All the http requests are finished
- auto finished = std::chrono::high_resolution_clock::now();
- auto elapsed = finished - started;
- auto secs = static_cast(elapsed.count() / 1000000000.0);
- print("Total cpus: %u\n", smp::count);
- print("Total requests: %u\n", total_reqs);
- print("Total time: %f\n", secs);
- print("Requests/sec: %f\n", static_cast(total_reqs) / secs);
- print("========== done ============\n");
- http_clients->stop().then([http_clients] {
- // FIXME: If we call engine().exit(0) here to exit when
- // requests are done. The tcp connection will not be closed
- // properly, becasue we exit too earily and the FIN packets are
- // not exchanged.
- delete http_clients;
- engine().exit(0);
- });
- });
- });
-}
diff --git a/configure.py b/configure.py
index 5e01829890..8b5a6703dc 100755
--- a/configure.py
+++ b/configure.py
@@ -1,21 +1,4 @@
#!/usr/bin/python3
-#
-# This file is open source software, licensed to you under the terms
-# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
-# distributed with this work for additional information regarding copyright
-# ownership. You may not use this file except in compliance with the License.
-#
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
import os, os.path, textwrap, argparse, sys, shlex, subprocess, tempfile, re
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:]])
@@ -195,58 +178,18 @@ urchin_tests = [
'tests/urchin/network_topology_strategy_test',
'tests/urchin/query_processor_test',
'tests/urchin/batchlog_manager_test',
-]
-
-tests = [
- 'tests/fileiotest',
- 'tests/directory_test',
- 'tests/linecount',
- 'tests/echotest',
- 'tests/l3_test',
- 'tests/ip_test',
- 'tests/timertest',
- 'tests/tcp_test',
- 'tests/futures_test',
- 'tests/foreign_ptr_test',
- 'tests/smp_test',
- 'tests/thread_test',
- 'tests/thread_context_switch',
- 'tests/udp_server',
- 'tests/udp_client',
- 'tests/blkdiscard_test',
- 'tests/sstring_test',
- 'tests/httpd',
- 'tests/memcached/test_ascii_parser',
- 'tests/tcp_server',
- 'tests/tcp_client',
- 'tests/allocator_test',
- 'tests/output_stream_test',
- 'tests/udp_zero_copy',
- 'tests/shared_ptr_test',
- 'tests/slab_test',
- 'tests/fstream_test',
- 'tests/distributed_test',
- 'tests/rpc',
- 'tests/semaphore_test',
- ]
-
-# urchin
-tests += [
'tests/urchin/bytes_ostream_test',
'tests/urchin/UUID_test',
'tests/urchin/murmur_hash_test',
]
apps = [
- 'apps/httpd/httpd',
- 'seastar',
- 'apps/seawreck/seawreck',
- 'apps/memcached/memcached',
+ 'scylla',
]
-tests += urchin_tests
+tests = urchin_tests
-all_artifacts = apps + tests + ['libseastar.a', 'seastar.pc']
+all_artifacts = apps + tests
arg_parser = argparse.ArgumentParser('Configure seastar')
arg_parser.add_argument('--static', dest = 'static', action = 'store_const', default = '',
@@ -276,121 +219,8 @@ add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
args = arg_parser.parse_args()
-libnet = [
- 'net/proxy.cc',
- 'net/virtio.cc',
- 'net/dpdk.cc',
- 'net/ip.cc',
- 'net/ethernet.cc',
- 'net/arp.cc',
- 'net/native-stack.cc',
- 'net/ip_checksum.cc',
- 'net/udp.cc',
- 'net/tcp.cc',
- 'net/dhcp.cc',
- ]
-
-core = [
- 'core/reactor.cc',
- 'core/fstream.cc',
- 'core/posix.cc',
- 'core/memory.cc',
- 'core/resource.cc',
- 'core/scollectd.cc',
- 'core/app-template.cc',
- 'core/thread.cc',
- 'core/dpdk_rte.cc',
- 'util/conversions.cc',
- 'net/packet.cc',
- 'net/posix-stack.cc',
- 'net/net.cc',
- 'rpc/rpc.cc',
- ]
-
-http = ['http/transformers.cc',
- 'http/json_path.cc',
- 'http/file_handler.cc',
- 'http/common.cc',
- 'http/routes.cc',
- 'json/json_elements.cc',
- 'json/formatter.cc',
- 'http/matcher.cc',
- 'http/mime_types.cc',
- 'http/httpd.cc',
- 'http/reply.cc',
- 'http/request_parser.rl',
- 'http/api_docs.cc',
- ]
-
-api = ['api/api.cc',
- 'api/api-doc/storage_service.json',
- 'api/storage_service.cc',
- 'api/api-doc/commitlog.json',
- 'api/commitlog.cc',
- 'api/api-doc/gossiper.json',
- 'api/gossiper.cc',
- 'api/api-doc/failure_detector.json',
- 'api/failure_detector.cc',
- 'api/api-doc/column_family.json',
- 'api/column_family.cc',
- 'api/messaging_service.cc',
- 'api/api-doc/messaging_service.json',
- 'api/api-doc/storage_proxy.json',
- 'api/storage_proxy.cc',
- 'api/api-doc/cache_service.json',
- 'api/cache_service.cc',
- 'api/api-doc/collectd.json',
- 'api/collectd.cc',
- 'api/api-doc/endpoint_snitch_info.json',
- 'api/endpoint_snitch.cc',
- 'api/api-doc/compaction_manager.json',
- 'api/compaction_manager.cc',
- 'api/api-doc/hinted_handoff.json',
- 'api/hinted_handoff.cc',
- ]
-
-boost_test_lib = [
- 'tests/test-utils.cc',
- 'tests/test_runner.cc',
-]
-
defines = []
-libs = '-laio -lboost_program_options -lboost_system -lboost_filesystem -lstdc++ -lm -lboost_unit_test_framework -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
-hwloc_libs = '-lhwloc -lnuma -lpciaccess -lxml2 -lz'
-urchin_libs = '-llz4 -lsnappy -lz'
-
-libs = urchin_libs + ' ' + libs
-
-xen_used = False
-def have_xen():
- source = '#include \n'
- source += '#include \n'
- source += '#include \n'
- source += '#include \n'
- source += '#include \n'
-
- return try_compile(compiler = args.cxx, source = source)
-
-if apply_tristate(args.xen, test = have_xen,
- note = 'Note: xen-devel not installed. No Xen support.',
- missing = 'Error: required package xen-devel not installed.'):
- libs += ' -lxenstore'
- defines.append("HAVE_XEN")
- libnet += [ 'net/xenfront.cc' ]
- core += [
- 'core/xen/xenstore.cc',
- 'core/xen/gntalloc.cc',
- 'core/xen/evtchn.cc',
- ]
- xen_used=True
-
-if xen_used and args.dpdk_target:
- print("Error: only xen or dpdk can be used, not both.")
- sys.exit(1)
-
-memcache_base = [
- 'apps/memcached/ascii.rl'
-] + libnet + core
+urchin_libs = '-llz4 -lsnappy -lz -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
cassandra_interface = Thrift(source = 'interface/cassandra.thrift', service = 'Cassandra')
@@ -532,74 +362,56 @@ urchin_core = (['database.cc',
]
+ [Antlr3Grammar('cql3/Cql.g')]
+ [Thrift('interface/cassandra.thrift', 'Cassandra')]
- + core + libnet)
+ )
-urchin_tests_dependencies = urchin_core + http + api + [
+api = ['api/api.cc',
+ 'api/api-doc/storage_service.json',
+ 'api/storage_service.cc',
+ 'api/api-doc/commitlog.json',
+ 'api/commitlog.cc',
+ 'api/api-doc/gossiper.json',
+ 'api/gossiper.cc',
+ 'api/api-doc/failure_detector.json',
+ 'api/failure_detector.cc',
+ 'api/api-doc/column_family.json',
+ 'api/column_family.cc',
+ 'api/messaging_service.cc',
+ 'api/api-doc/messaging_service.json',
+ 'api/api-doc/storage_proxy.json',
+ 'api/storage_proxy.cc',
+ 'api/api-doc/cache_service.json',
+ 'api/cache_service.cc',
+ 'api/api-doc/collectd.json',
+ 'api/collectd.cc',
+ 'api/api-doc/endpoint_snitch_info.json',
+ 'api/endpoint_snitch.cc',
+ 'api/api-doc/compaction_manager.json',
+ 'api/compaction_manager.cc',
+ 'api/api-doc/hinted_handoff.json',
+ 'api/hinted_handoff.cc',
+ ]
+
+urchin_tests_dependencies = urchin_core + [
'tests/urchin/cql_test_env.cc',
'tests/urchin/cql_assertions.cc',
'tests/urchin/result_set_assertions.cc',
]
+urchin_tests_seastar_deps = [
+ 'seastar/tests/test-utils.cc',
+ 'seastar/tests/test_runner.cc',
+]
+
deps = {
- 'libseastar.a' : core + libnet,
- 'seastar.pc': [],
- 'seastar': ['main.cc'] + http + api + urchin_core,
- 'apps/httpd/httpd': ['apps/httpd/demo.json', 'apps/httpd/main.cc'] + http + libnet + core,
- 'apps/memcached/memcached': ['apps/memcached/memcache.cc'] + memcache_base,
- 'tests/memcached/test_ascii_parser': ['tests/memcached/test_ascii_parser.cc'] + memcache_base + boost_test_lib,
- 'tests/fileiotest': ['tests/fileiotest.cc'] + core + boost_test_lib,
- 'tests/directory_test': ['tests/directory_test.cc'] + core,
- 'tests/linecount': ['tests/linecount.cc'] + core,
- 'tests/echotest': ['tests/echotest.cc'] + core + libnet,
- 'tests/l3_test': ['tests/l3_test.cc'] + core + libnet,
- 'tests/ip_test': ['tests/ip_test.cc'] + core + libnet,
- 'tests/tcp_test': ['tests/tcp_test.cc'] + core + libnet,
- 'tests/timertest': ['tests/timertest.cc'] + core,
- 'tests/futures_test': ['tests/futures_test.cc'] + core + boost_test_lib,
- 'tests/foreign_ptr_test': ['tests/foreign_ptr_test.cc'] + core + boost_test_lib,
- 'tests/semaphore_test': ['tests/semaphore_test.cc'] + core + boost_test_lib,
- 'tests/smp_test': ['tests/smp_test.cc'] + core,
- 'tests/thread_test': ['tests/thread_test.cc'] + core + boost_test_lib,
- 'tests/thread_context_switch': ['tests/thread_context_switch.cc'] + core,
- 'tests/udp_server': ['tests/udp_server.cc'] + core + libnet,
- 'tests/udp_client': ['tests/udp_client.cc'] + core + libnet,
- 'tests/tcp_server': ['tests/tcp_server.cc'] + core + libnet,
- 'tests/tcp_client': ['tests/tcp_client.cc'] + core + libnet,
- 'apps/seawreck/seawreck': ['apps/seawreck/seawreck.cc', 'apps/seawreck/http_response_parser.rl'] + core + libnet,
- 'tests/blkdiscard_test': ['tests/blkdiscard_test.cc'] + core,
- 'tests/sstring_test': ['tests/sstring_test.cc'] + core,
- 'tests/httpd': ['tests/httpd.cc'] + http + core + boost_test_lib,
- 'tests/allocator_test': ['tests/allocator_test.cc', 'core/memory.cc', 'core/posix.cc'],
- 'tests/output_stream_test': ['tests/output_stream_test.cc'] + core + libnet + boost_test_lib,
- 'tests/udp_zero_copy': ['tests/udp_zero_copy.cc'] + core + libnet,
- 'tests/shared_ptr_test': ['tests/shared_ptr_test.cc'] + core,
- 'tests/slab_test': ['tests/slab_test.cc'] + core,
- 'tests/fstream_test': ['tests/fstream_test.cc'] + core + boost_test_lib,
- 'tests/distributed_test': ['tests/distributed_test.cc'] + core,
- 'tests/rpc': ['tests/rpc.cc'] + core + libnet,
- 'tests/urchin/gossiping_property_file_snitch_test': ['tests/urchin/gossiping_property_file_snitch_test.cc'] + urchin_core,
- 'tests/urchin/network_topology_strategy_test': ['tests/urchin/network_topology_strategy_test.cc'] + urchin_core,
+ 'scylla': ['main.cc'] + urchin_core + api,
}
for t in urchin_tests:
deps[t] = urchin_tests_dependencies + [t + '.cc']
+ if 'types_test' not in t and 'keys_test' not in t and 'partitioner_test' not in t and 'map_difference_test' not in t and 'frozen_mutation_test' not in t and 'perf_mutation' not in t and 'cartesian_product_test' not in t and 'perf_hash' not in t and 'perf_cql_parser' not in t and 'message' not in t and 'perf_simple_query' not in t and 'serialization' not in t and t != 'tests/urchin/gossip' and 'compound_test' not in t:
+ deps[t] += urchin_tests_seastar_deps
-deps['tests/urchin/mutation_test'] += boost_test_lib
-deps['tests/urchin/cql_query_test'] += boost_test_lib
-deps['tests/urchin/mutation_reader_test'] += boost_test_lib
-deps['tests/urchin/mutation_query_test'] += boost_test_lib
-deps['tests/urchin/commitlog_test'] += boost_test_lib
-deps['tests/urchin/config_test'] += boost_test_lib
-deps['tests/urchin/sstable_test'] += boost_test_lib + ['tests/urchin/sstable_datafile_test.cc']
-deps['tests/urchin/sstable_mutation_test'] += boost_test_lib
-deps['tests/urchin/hash_test'] += boost_test_lib
-deps['tests/urchin/serializer_test'] += boost_test_lib
-deps['tests/urchin/gossip_test'] += boost_test_lib
-deps['tests/urchin/gossiping_property_file_snitch_test'] += boost_test_lib
-deps['tests/urchin/network_topology_strategy_test'] += boost_test_lib
-deps['tests/urchin/row_cache_test'] += boost_test_lib
-deps['tests/urchin/query_processor_test'] += boost_test_lib
-deps['tests/urchin/batchlog_manager_test'] += boost_test_lib
+deps['tests/urchin/sstable_test'] += ['tests/urchin/sstable_datafile_test.cc']
deps['tests/urchin/bytes_ostream_test'] = ['tests/urchin/bytes_ostream_test.cc']
deps['tests/urchin/UUID_test'] = ['utils/UUID_gen.cc', 'tests/urchin/UUID_test.cc']
@@ -610,55 +422,6 @@ warnings = [
'-Wno-maybe-uninitialized', # false positives on gcc 5
]
-# The "--with-osv=" parameter is a shortcut for a bunch of other
-# settings:
-if args.with_osv:
- args.so = True
- args.hwloc = False
- args.user_cflags = (args.user_cflags +
- ' -DDEFAULT_ALLOCATOR -fvisibility=default -DHAVE_OSV -I' +
- args.with_osv + ' -I' + args.with_osv + '/include -I' +
- args.with_osv + '/arch/x64')
-
-if args.dpdk:
- subprocess.check_call('make -C dpdk RTE_OUTPUT=$PWD/build/dpdk/ config T=x86_64-native-linuxapp-gcc',
- shell = True)
- # adjust configutation to taste
- dotconfig = 'build/dpdk/.config'
- lines = open(dotconfig).readlines()
- def update(lines, vars):
- ret = []
- for line in lines:
- for var, val in vars.items():
- if line.startswith(var + '='):
- line = var + '=' + val + '\n'
- ret.append(line)
- return ret
- lines = update(lines, {'CONFIG_RTE_LIBRTE_PMD_BOND': 'n',
- 'CONFIG_RTE_MBUF_SCATTER_GATHER': 'n',
- 'CONFIG_RTE_LIBRTE_IP_FRAG': 'n',
- 'CONFIG_RTE_APP_TEST': 'n',
- 'CONFIG_RTE_TEST_PMD': 'n',
- 'CONFIG_RTE_MBUF_REFCNT_ATOMIC': 'n',
- 'CONFIG_RTE_MAX_MEMSEG': '8192',
- })
- open(dotconfig, 'w').writelines(lines)
- args.dpdk_target = 'build/dpdk'
-
-if args.dpdk_target:
- args.user_cflags = (args.user_cflags +
- ' -DHAVE_DPDK -I' + args.dpdk_target + '/include ' +
- dpdk_cflags(args.dpdk_target) +
- ' -Wno-error=literal-suffix -Wno-literal-suffix -Wno-invalid-offsetof')
- libs += (' -L' + args.dpdk_target + '/lib ')
- if args.with_osv:
- libs += '-lintel_dpdk -lrt -lm -ldl'
- else:
- libs += '-Wl,--whole-archive -lrte_pmd_vmxnet3_uio -lrte_pmd_i40e -lrte_pmd_ixgbe -lrte_pmd_e1000 -lrte_pmd_ring -Wl,--no-whole-archive -lrte_distributor -lrte_pipeline -lrte_table -lrte_port -lrte_timer -lrte_hash -lrte_lpm -lrte_power -lrte_acl -lrte_meter -lrte_sched -lrte_kvargs -lrte_mbuf -lethdev -lrte_eal -lrte_malloc -lrte_mempool -lrte_ring -lrte_cmdline -lrte_cfgfile -lrt -lm -ldl'
-
-args.user_cflags += " " + pkg_config("--cflags", "jsoncpp")
-libs += " " + pkg_config("--libs", "jsoncpp")
-
warnings = [w
for w in warnings
if warning_supported(warning = w, compiler = args.cxx)]
@@ -667,16 +430,6 @@ warnings = ' '.join(warnings)
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
-def have_hwloc():
- return try_compile(compiler = args.cxx, source = '#include \n#include ')
-
-if apply_tristate(args.hwloc, test = have_hwloc,
- note = 'Note: hwloc-devel/numactl-devel not installed. No NUMA support.',
- missing = 'Error: required packages hwloc-devel/numactl-devel not installed.'):
- libs += ' ' + hwloc_libs
- defines.append('HAVE_HWLOC')
- defines.append('HAVE_NUMA')
-
if args.so:
args.pie = '-shared'
args.fpie = '-fpic'
@@ -697,13 +450,43 @@ link_pool_depth = max(int(total_memory / 7e9), 1)
build_modes = modes if args.mode == 'all' else [args.mode]
build_artifacts = all_artifacts if not args.artifacts else args.artifacts
-dpdk_sources = []
-if args.dpdk:
- for root, dirs, files in os.walk('dpdk'):
- dpdk_sources += [os.path.join(root, file)
- for file in files
- if file.endswith('.h') or file.endswith('.c')]
-dpdk_sources = ' '.join(dpdk_sources)
+status = subprocess.call('./configure.py', cwd = 'seastar')
+
+if status != 0:
+ print('Seastar configuration failed')
+ sys.exit(1)
+
+
+pc = { mode : 'build/{}/seastar.pc'.format(mode) for mode in build_modes }
+for ninja in ['ninja', 'ninja-build', 'true']:
+ try:
+ status = subprocess.call([ninja] + list(pc.values()), cwd = 'seastar')
+ if status == 0:
+ break
+ except OSError as e:
+ pass
+if ninja == 'true':
+ print('Unable to create {}'.format(pc))
+ sys.exit(1)
+
+for mode in build_modes:
+ cfg = dict([line.strip().split(': ', 1)
+ for line in open('seastar/' + pc[mode])
+ if ': ' in line])
+ modes[mode]['seastar_cflags'] = cfg['Cflags']
+ modes[mode]['seastar_libs'] = cfg['Libs']
+
+def gen_seastar_deps():
+ for root, dir, files in os.walk('seastar'):
+ for f in files:
+ if f.endswith('.cc') or f.endswith('.hh'):
+ yield root + '/' + f
+
+seastar_deps = ' '.join(gen_seastar_deps())
+
+args.user_cflags += " " + pkg_config("--cflags", "jsoncpp")
+libs = "-lyaml-cpp -llz4 -lz -lsnappy " + pkg_config("--libs", "jsoncpp") + ' -lboost_filesystem'
+user_cflags = args.user_cflags
outdir = 'build'
buildfile = 'build.ninja'
@@ -720,9 +503,8 @@ with open(buildfile, 'w') as f:
configure_args = {configure_args}
builddir = {outdir}
cxx = {cxx}
- # we disable _FORTIFY_SOURCE because it generates false positives with longjmp() (core/thread.cc)
- cxxflags = -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I. -U_FORTIFY_SOURCE {user_cflags} {warnings} {defines}
- ldflags = {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags}
+ cxxflags = {user_cflags} {warnings} {defines}
+ ldflags = {user_ldflags}
libs = {libs}
pool link_pool
depth = {link_pool_depth}
@@ -733,8 +515,11 @@ with open(buildfile, 'w') as f:
command = echo -e $text > $out
description = GEN $out
rule swagger
- command = json/json2code.py -f $in -o $out
+ command = seastar/json/json2code.py -f $in -o $out
description = SWAGGER $out
+ rule ninja
+ command = {ninja} -C $subdir $target
+ description = NINJA $out
''').format(**globals()))
if args.dpdk:
f.write(textwrap.dedent('''\
@@ -744,19 +529,14 @@ with open(buildfile, 'w') as f:
''').format(**globals()))
for mode in build_modes:
modeval = modes[mode]
- if modeval['sanitize'] and not do_sanitize:
- print('Note: --static disables debug mode sanitizers')
- modeval['sanitize'] = ''
- modeval['sanitize_libs'] = ''
f.write(textwrap.dedent('''\
- cxxflags_{mode} = {sanitize} {opt} -I $builddir/{mode}/gen
- libs_{mode} = {libs} {sanitize_libs}
+ cxxflags_{mode} = -I. -I $builddir/{mode}/gen -I seastar
rule cxx.{mode}
- command = $cxx -MMD -MT $out -MF $out.d $cxxflags $cxxflags_{mode} -c -o $out $in
+ command = $cxx -MMD -MT $out -MF $out.d {seastar_cflags} $cxxflags $cxxflags_{mode} -c -o $out $in
description = CXX $out
depfile = $out.d
rule link.{mode}
- command = $cxx $cxxflags_{mode} $ldflags -o $out $in $libs $libs_{mode}
+ command = $cxx $cxxflags_{mode} $ldflags {seastar_libs} -o $out $in $libs $libs_{mode}
description = LINK $out
pool = link_pool
rule link_stripped.{mode}
@@ -807,18 +587,8 @@ with open(buildfile, 'w') as f:
elif binary.endswith('.a'):
f.write('build $builddir/{}/{}: ar.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
else:
- if binary.startswith('tests/'):
- # Our code's debugging information is huge, and multiplied
- # by many tests yields ridiculous amounts of disk space.
- # So we strip the tests by default; The user can very
- # quickly re-link the test unstripped by adding a "_g"
- # to the test name, e.g., "ninja build/release/testname_g"
- f.write('build $builddir/{}/{}: link_stripped.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
- if has_thrift:
- f.write(' libs = -lthrift -lboost_system $libs\n')
- f.write('build $builddir/{}/{}_g: link.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
- else:
- f.write('build $builddir/{}/{}: link.{} {} | {}\n'.format(mode, binary, mode, str.join(' ', objs), dpdk_deps))
+ f.write('build $builddir/{}/{}: link.{} {} {}\n'.format(mode, binary, mode, str.join(' ', objs),
+ 'seastar/build/{}/libseastar.a'.format(mode)))
if has_thrift:
f.write(' libs = -lthrift -lboost_system $libs\n')
for src in srcs:
@@ -865,6 +635,9 @@ with open(buildfile, 'w') as f:
for cc in grammar.sources('$builddir/{}/gen'.format(mode)):
obj = cc.replace('.cpp', '.o')
f.write('build {}: cxx.{} {}\n'.format(obj, mode, cc))
+ f.write('build seastar/build/{}/libseastar.a: ninja {}\n'.format(mode, seastar_deps))
+ f.write(' subdir = seastar\n')
+ f.write(' target = build/{}/libseastar.a\n'.format(mode))
f.write(textwrap.dedent('''\
rule configure
command = python3 configure.py $configure_args
diff --git a/core/align.hh b/core/align.hh
deleted file mode 100644
index e53c5dd91e..0000000000
--- a/core/align.hh
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-#ifndef ALIGN_HH_
-#define ALIGN_HH_
-
-#include
-#include
-
-template
-inline constexpr
-T align_up(T v, T align) {
- return (v + align - 1) & ~(align - 1);
-}
-
-template
-inline constexpr
-T* align_up(T* v, size_t align) {
- static_assert(sizeof(T) == 1, "align byte pointers only");
- return reinterpret_cast(align_up(reinterpret_cast(v), align));
-}
-
-template
-inline constexpr
-T align_down(T v, T align) {
- return v & ~(align - 1);
-}
-
-template
-inline constexpr
-T* align_down(T* v, size_t align) {
- static_assert(sizeof(T) == 1, "align byte pointers only");
- return reinterpret_cast(align_down(reinterpret_cast(v), align));
-}
-
-#endif /* ALIGN_HH_ */
diff --git a/core/app-template.cc b/core/app-template.cc
deleted file mode 100644
index f40a04d2a9..0000000000
--- a/core/app-template.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-#include "app-template.hh"
-#include "core/reactor.hh"
-#include "core/scollectd.hh"
-#include "core/print.hh"
-#include
-#include
-#include
-#include
-
-namespace bpo = boost::program_options;
-
-app_template::app_template()
- : _opts("App options") {
- _opts.add_options()
- ("help,h", "show help message")
- ;
- _opts.add(reactor::get_options_description());
- _opts.add(smp::get_options_description());
- _opts.add(scollectd::get_options_description());
-}
-
-boost::program_options::options_description_easy_init
-app_template::add_options() {
- return _opts.add_options();
-}
-
-void
-app_template::add_positional_options(std::initializer_list options) {
- for (auto&& o : options) {
- _opts.add(boost::make_shared(o.name, o.value_semantic, o.help));
- _pos_opts.add(o.name, o.max_count);
- }
-}
-
-
-bpo::variables_map&
-app_template::configuration() {
- return *_configuration;
-}
-
-int
-app_template::run(int ac, char ** av, std::function&& func) {
-#ifdef DEBUG
- print("WARNING: debug mode. Not for benchmarking or production\n");
-#endif
- bpo::variables_map configuration;
- try {
- bpo::store(bpo::command_line_parser(ac, av)
- .options(_opts)
- .positional(_pos_opts)
- .run()
- , configuration);
- auto home = std::getenv("HOME");
- if (home) {
- std::ifstream ifs(std::string(home) + "/.config/seastar/seastar.conf");
- if (ifs) {
- bpo::store(bpo::parse_config_file(ifs, _opts), configuration);
- }
- }
- } catch (bpo::error& e) {
- print("error: %s\n\nTry --help.\n", e.what());
- return 2;
- }
- bpo::notify(configuration);
- if (configuration.count("help")) {
- std::cout << _opts << "\n";
- return 1;
- }
- smp::configure(configuration);
- _configuration = {std::move(configuration)};
- engine().when_started().then([this] {
- scollectd::configure( this->configuration());
- }).then(
- std::move(func)
- ).then_wrapped([] (auto&& f) {
- try {
- f.get();
- } catch (std::exception& ex) {
- std::cout << "program failed with uncaught exception: " << ex.what() << "\n";
- engine().exit(1);
- }
- });
- auto exit_code = engine().run();
- smp::cleanup();
- return exit_code;
-}
diff --git a/core/app-template.hh b/core/app-template.hh
deleted file mode 100644
index bd357a3ee5..0000000000
--- a/core/app-template.hh
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-#ifndef _APP_TEMPLATE_HH
-#define _APP_TEMPLATE_HH
-
-#include
-#include
-#include
-
-class app_template {
-private:
- boost::program_options::options_description _opts;
- boost::program_options::positional_options_description _pos_opts;
- boost::optional _configuration;
-public:
- struct positional_option {
- const char* name;
- const boost::program_options::value_semantic* value_semantic;
- const char* help;
- int max_count;
- };
-public:
- app_template();
- boost::program_options::options_description_easy_init add_options();
- void add_positional_options(std::initializer_list options);
- boost::program_options::variables_map& configuration();
- int run(int ac, char ** av, std::function&& func);
-};
-
-#endif
diff --git a/core/apply.hh b/core/apply.hh
deleted file mode 100644
index c015274fde..0000000000
--- a/core/apply.hh
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-#ifndef APPLY_HH_
-#define APPLY_HH_
-
-#include
-#include
-
-template
-struct apply_helper;
-
-template
-struct apply_helper> {
- static auto apply(Func&& func, Tuple args) {
- return func(std::get(std::forward(args))...);
- }
-};
-
-template
-inline
-auto apply(Func&& func, std::tuple&& args) {
- using helper = apply_helper&&, std::index_sequence_for>;
- return helper::apply(std::forward(func), std::move(args));
-}
-
-template
-inline
-auto apply(Func&& func, std::tuple& args) {
- using helper = apply_helper&, std::index_sequence_for>;
- return helper::apply(std::forward(func), args);
-}
-
-template
-inline
-auto apply(Func&& func, const std::tuple& args) {
- using helper = apply_helper&, std::index_sequence_for>;
- return helper::apply(std::forward(func), args);
-}
-
-#endif /* APPLY_HH_ */
diff --git a/core/array_map.hh b/core/array_map.hh
deleted file mode 100644
index 9e0a1f60d1..0000000000
--- a/core/array_map.hh
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-#ifndef ARRAY_MAP_HH_
-#define ARRAY_MAP_HH_
-
-#include
-
-// unordered_map implemented as a simple array
-
-template
-class array_map {
- std::array _a {};
-public:
- array_map(std::initializer_list> i) {
- for (auto kv : i) {
- _a[kv.first] = kv.second;
- }
- }
- Value& operator[](size_t key) { return _a[key]; }
- const Value& operator[](size_t key) const { return _a[key]; }
-
- Value& at(size_t key) {
- if (key >= Max) {
- throw std::out_of_range(std::to_string(key) + " >= " + std::to_string(Max));
- }
- return _a[key];
- }
-};
-
-
-
-#endif /* ARRAY_MAP_HH_ */
diff --git a/core/bitops.hh b/core/bitops.hh
deleted file mode 100644
index cece53676a..0000000000
--- a/core/bitops.hh
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * This file is open source software, licensed to you under the terms
- * of the Apache License, Version 2.0 (the "License"). See the NOTICE file
- * distributed with this work for additional information regarding copyright
- * ownership. You may not use this file except in compliance with the License.
- *
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-#ifndef BITOPS_HH_
-#define BITOPS_HH_
-
-inline
-constexpr unsigned count_leading_zeros(unsigned x) {
- return __builtin_clz(x);
-}
-
-inline
-constexpr unsigned count_leading_zeros(unsigned long x) {
- return __builtin_clzl(x);
-}
-
-inline
-constexpr unsigned count_leading_zeros(unsigned long long x) {
- return __builtin_clzll(x);
-}
-
-inline
-constexpr unsigned count_trailing_zeros(unsigned x) {
- return __builtin_ctz(x);
-}
-
-inline
-constexpr unsigned count_trailing_zeros(unsigned long x) {
- return __builtin_ctzl(x);
-}
-
-inline
-constexpr unsigned count_trailing_zeros(unsigned long long x) {
- return __builtin_ctzll(x);
-}
-
-#endif /* BITOPS_HH_ */
diff --git a/core/bitset-iter.hh b/core/bitset-iter.hh
deleted file mode 100644
index 5a1cf09ae6..0000000000
--- a/core/bitset-iter.hh
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- */
-
-/*
- * Imported from OSv:
- *
- * Copyright (C) 2014 Cloudius Systems, Ltd.
- *
- * This work is open source software, licensed under the terms of the
- * BSD license as described in the LICENSE file in the top-level directory.
- */
-
-#ifndef __OSV_BITSET_ITER
-#define __OSV_BITSET_ITER
-
-#include
-#include
-
-namespace bitsets {
-
-static constexpr int ulong_bits = std::numeric_limits::digits;
-
-/**
- * Returns the number of leading zeros in value's binary representation.
- *
- * If value == 0 the result is undefied. If T is signed and value is negative
- * the result is undefined.
- *
- * The highest value that can be returned is std::numeric_limits::digits - 1,
- * which is returned when value == 1.
- */
-template
-inline size_t count_leading_zeros(T value);
-
-/**
- * Returns the number of trailing zeros in value's binary representation.
- *
- * If value == 0 the result is undefied. If T is signed and value is negative
- * the result is undefined.
- *
- * The highest value that can be returned is std::numeric_limits::digits - 1.
- */
-template
-static inline size_t count_trailing_zeros(T value);
-
-template<>
-inline size_t count_leading_zeros(unsigned long value)
-{
- return __builtin_clzl(value);
-}
-
-template<>
-inline size_t count_leading_zeros(long value)
-{
- return __builtin_clzl((unsigned long)value) - 1;
-}
-
-template<>
-inline
-size_t count_trailing_zeros(unsigned long value)
-{
- return __builtin_ctzl(value);
-}
-
-template<>
-inline
-size_t count_trailing_zeros