Compare commits

..

1 Commits

Author SHA1 Message Date
Pekka Enberg
737a9019a4 dist/docker: Add missing "hostname" package
The Fedora base image has changed so we need to add "hostname" that's
used by the Docker-specific launch script to our image.

Fixes Scylla startup.

Signed-off-by: Pekka Enberg <penberg@scylladb.com>
2015-10-15 13:44:38 +03:00
480 changed files with 9049 additions and 33550 deletions

4
.gitignore vendored
View File

@@ -4,7 +4,3 @@
build
build.ninja
cscope.*
/debian/
dist/ami/files/*.rpm
dist/ami/variables.json
dist/ami/scylla_deploy.sh

76
ORIGIN
View File

@@ -1,77 +1 @@
http://git-wip-us.apache.org/repos/asf/cassandra.git trunk (bf599fb5b062cbcc652da78b7d699e7a01b949ad)
import = bf599fb5b062cbcc652da78b7d699e7a01b949ad
Y = Already in scylla
$ git log --oneline import..cassandra-2.1.11 -- gms/
Y 484e645 Mark node as dead even if already left
d0c166f Add trampled commit back
ba5837e Merge branch 'cassandra-2.0' into cassandra-2.1
718e47f Forgot a damn c/r
a7282e4 Merge branch 'cassandra-2.0' into cassandra-2.1
Y ae4cd69 Print versions for gossip states in gossipinfo.
Y 7fba3d2 Don't mark nodes down before the max local pause interval once paused.
c2142e6 Merge branch 'cassandra-2.0' into cassandra-2.1
ba9a69e checkForEndpointCollision fails for legitimate collisions, finalized list of statuses and nits, CASSANDRA-9765
54470a2 checkForEndpointCollision fails for legitimate collisions, improved version after CR, CASSANDRA-9765
2c9b490 checkForEndpointCollision fails for legitimate collisions, CASSANDRA-9765
4c15970 Merge branch 'cassandra-2.0' into cassandra-2.1
ad8047a ArrivalWindow should use primitives
Y 4012134 Failure detector detects and ignores local pauses
9bcdd0f Merge branch 'cassandra-2.0' into cassandra-2.1
cefaa4e Close incoming connections when MessagingService is stopped
ea1beda Merge branch 'cassandra-2.0' into cassandra-2.1
08dbbd6 Ignore gossip SYNs after shutdown
3c17ac6 Merge branch 'cassandra-2.0' into cassandra-2.1
a64bc43 lists work better when you initialize them
543a899 change list to arraylist
730d4d4 Merge branch 'cassandra-2.0' into cassandra-2.1
e3e2de0 change list to arraylist
f7884c5 Merge branch 'cassandra-2.0' into cassandra-2.1
Y 84b2846 remove redundant state
4f2c372 Merge branch 'cassandra-2.0' into cassandra-2.1
Y b2c62bb Add shutdown gossip state to prevent timeouts during rolling restarts
Y def4835 Add missing follow on fix for 7816 only applied to cassandra-2.1 branch in 763130bdbde2f4cec2e8973bcd5203caf51cc89f
Y 763130b Followup commit for 7816
1376b8e Merge branch 'cassandra-2.0' into cassandra-2.1
Y 2199a87 Fix duplicate up/down messages sent to native clients
136042e Merge branch 'cassandra-2.0' into cassandra-2.1
Y eb9c5bb Improve FD logging when the arrival time is ignored.
$ git log --oneline import..cassandra-2.1.11 -- service/StorageService.java
92c5787 Keep StorageServiceMBean interface stable
6039d0e Fix DC and Rack in nodetool info
a2f0da0 Merge branch 'cassandra-2.0' into cassandra-2.1
c4de752 Follow-up to CASSANDRA-10238
e889ee4 2i key cache load fails
4b1d59e Merge branch 'cassandra-2.0' into cassandra-2.1
257cdaa Fix consolidating racks violating the RF contract
Y 27754c0 refuse to decomission if not in state NORMAL patch by Jan Karlsson and Stefania for CASSANDRA-8741
Y 5bc56c3 refuse to decomission if not in state NORMAL patch by Jan Karlsson and Stefania for CASSANDRA-8741
Y 8f9ca07 Cannot replace token does not exist - DN node removed as Fat Client
c2142e6 Merge branch 'cassandra-2.0' into cassandra-2.1
54470a2 checkForEndpointCollision fails for legitimate collisions, improved version after CR, CASSANDRA-9765
1eccced Handle corrupt files on startup
2c9b490 checkForEndpointCollision fails for legitimate collisions, CASSANDRA-9765
c4b5260 Merge branch 'cassandra-2.0' into cassandra-2.1
Y 52dbc3f Can't transition from write survey to normal mode
9966419 Make rebuild only run one at a time
d693ca1 Merge branch 'cassandra-2.0' into cassandra-2.1
be9eff5 Add option to not validate atoms during scrub
2a4daaf followup fix for 8564
93478ab Wait for anticompaction to finish
9e9846e Fix for harmless exceptions being logged as ERROR
6d06f32 Fix anticompaction blocking ANTI_ENTROPY stage
4f2c372 Merge branch 'cassandra-2.0' into cassandra-2.1
Y b2c62bb Add shutdown gossip state to prevent timeouts during rolling restarts
Y cba1b68 Fix failed bootstrap/replace attempts being persisted in system.peers
f59df28 Allow takeColumnFamilySnapshot to take a list of tables patch by Sachin Jarin; reviewed by Nick Bailey for CASSANDRA-8348
Y ac46747 Fix failed bootstrap/replace attempts being persisted in system.peers
5abab57 Merge branch 'cassandra-2.0' into cassandra-2.1
0ff9c3c Allow reusing snapshot tags across different column families.
f9c57a5 Merge branch 'cassandra-2.0' into cassandra-2.1
Y b296c55 Fix MOVED_NODE client event
bbb3fc7 Merge branch 'cassandra-2.0' into cassandra-2.1
37eb2a0 Fix NPE in nodetool getendpoints with bad ks/cf
f8b43d4 Merge branch 'cassandra-2.0' into cassandra-2.1
e20810c Remove C* specific class from JMX API

View File

@@ -11,35 +11,11 @@ git submodule init
git submodule update --recursive
```
### Building and Running Scylla on Fedora
* Installing required packages:
### Building scylla on Fedora
Installing required packages:
```
sudo yum install yaml-cpp-devel lz4-devel zlib-devel snappy-devel jsoncpp-devel thrift-devel antlr3-tool antlr3-C++-devel libasan libubsan gcc-c++ gnutls-devel ninja-build ragel libaio-devel cryptopp-devel xfsprogs-devel
```
* Build Scylla
```
./configure.py --mode=release --with=scylla --disable-xen
ninja-build build/release/scylla -j2 # you can use more cpus if you have tons of RAM
```
* Run Scylla
```
./build/release/scylla
```
* run Scylla with one CPU and ./tmp as data directory
```
./build/release/scylla --datadir tmp --commitlog-directory tmp --smp 1
```
* For more run options:
```
./build/release/scylla --help
sudo yum install yaml-cpp-devel lz4-devel zlib-devel snappy-devel jsoncpp-devel thrift-devel antlr3-tool antlr3-C++-devel libasan libubsan
```
## Building Fedora RPM
@@ -80,17 +56,5 @@ docker build -t <image-name> .
Run the image with:
```
docker run -p $(hostname -i):9042:9042 -i -t <image name>
docker run -i -t <image name>
```
## Contributing to Scylla
Do not send pull requests.
Send patches to the mailing list address scylladb-dev@googlegroups.com.
Be sure to subscribe.
In order for your patches to be merged, you must sign the Contributor's
License Agreement, protecting your rights and ours. See
http://www.scylladb.com/opensource/cla/.

View File

@@ -1,6 +1,6 @@
#!/bin/sh
VERSION=0.16
VERSION=0.10
if test -f version
then

View File

@@ -579,6 +579,30 @@
}
]
},
{
"path":"/column_family/sstables/snapshots_size/{name}",
"operations":[
{
"method":"GET",
"summary":"the size of SSTables in 'snapshots' subdirectory which aren't live anymore",
"type":"double",
"nickname":"true_snapshots_size",
"produces":[
"application/json"
],
"parameters":[
{
"name":"name",
"description":"The column family name in keysspace:name format",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
}
]
},
{
"path":"/column_family/metrics/memtable_columns_count/{name}",
"operations":[
@@ -2017,7 +2041,7 @@
]
},
{
"path":"/column_family/metrics/snapshots_size/{name}",
"path":"/column_family/metrics/true_snapshots_size/{name}",
"operations":[
{
"method":"GET",

View File

@@ -15,7 +15,7 @@
"summary":"get List of running compactions",
"type":"array",
"items":{
"type":"summary"
"type":"jsonmap"
},
"nickname":"get_compactions",
"produces":[
@@ -27,35 +27,16 @@
]
},
{
"path":"/compaction_manager/compaction_history",
"path":"/compaction_manager/compaction_summary",
"operations":[
{
"method":"GET",
"summary":"get List of the compaction history",
"summary":"get compaction summary",
"type":"array",
"items":{
"type":"history"
"type":"string"
},
"nickname":"get_compaction_history",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/compaction_manager/compaction_info",
"operations":[
{
"method":"GET",
"summary":"get a list of all active compaction info",
"type":"array",
"items":{
"type":"compaction_info"
},
"nickname":"get_compaction_info",
"nickname":"get_compaction_summary",
"produces":[
"application/json"
],
@@ -174,112 +155,32 @@
}
],
"models":{
"row_merged":{
"id":"row_merged",
"description":"A row merged information",
"mapper":{
"id":"mapper",
"description":"A key value mapping",
"properties":{
"key":{
"type":"int",
"description":"The number of sstable"
"type":"string",
"description":"The key"
},
"value":{
"type":"long",
"description":"The number or row compacted"
"type":"string",
"description":"The value"
}
}
},
"compaction_info" :{
"id": "compaction_info",
"description":"A key value mapping",
"properties":{
"operation_type":{
"type":"string",
"description":"The operation type"
},
"completed":{
"type":"long",
"description":"The current completed"
},
"total":{
"type":"long",
"description":"The total to compact"
},
"unit":{
"type":"string",
"description":"The compacted unit"
}
}
},
"summary":{
"id":"summary",
"description":"A compaction summary object",
"jsonmap":{
"id":"jsonmap",
"description":"A json representation of a map as a list of key value",
"properties":{
"id":{
"type":"string",
"description":"The UUID"
},
"ks":{
"type":"string",
"description":"The keyspace name"
},
"cf":{
"type":"string",
"description":"The column family name"
},
"completed":{
"type":"long",
"description":"The number of units completed"
},
"total":{
"type":"long",
"description":"The total number of units"
},
"task_type":{
"type":"string",
"description":"The task compaction type"
},
"unit":{
"type":"string",
"description":"The units being used"
}
}
},
"history": {
"id":"history",
"description":"Compaction history information",
"properties":{
"id":{
"type":"string",
"description":"The UUID"
},
"cf":{
"type":"string",
"description":"The column family name"
},
"ks":{
"type":"string",
"description":"The keyspace name"
},
"compacted_at":{
"type":"long",
"description":"The time of compaction"
},
"bytes_in":{
"type":"long",
"description":"Bytes in"
},
"bytes_out":{
"type":"long",
"description":"Bytes out"
},
"rows_merged":{
"value":{
"type":"array",
"items":{
"type":"row_merged"
"type":"mapper"
},
"description":"The merged rows"
"description":"A list of key, value mapping"
}
}
}
}
}
}
}

View File

@@ -48,10 +48,7 @@
{
"method":"GET",
"summary":"Get all endpoint states",
"type":"array",
"items":{
"type":"endpoint_state"
},
"type":"string",
"nickname":"get_all_endpoint_states",
"produces":[
"application/json"
@@ -151,53 +148,6 @@
"description": "The value"
}
}
},
"endpoint_state": {
"id": "states",
"description": "Holds an endpoint state",
"properties": {
"addrs": {
"type": "string",
"description": "The endpoint address"
},
"generation": {
"type": "int",
"description": "The heart beat generation"
},
"version": {
"type": "int",
"description": "The heart beat version"
},
"update_time": {
"type": "long",
"description": "The update timestamp"
},
"is_alive": {
"type": "boolean",
"description": "Is the endpoint alive"
},
"application_state" : {
"type":"array",
"items":{
"type":"version_value"
},
"description": "Is the endpoint alive"
}
}
},
"version_value": {
"id": "version_value",
"description": "Holds a version value for an application state",
"properties": {
"application_state": {
"type": "int",
"description": "The application state enum index"
},
"value": {
"type": "string",
"description": "The version value"
}
}
}
}
}

View File

@@ -8,16 +8,13 @@
],
"apis":[
{
"path":"/messaging_service/messages/timeout",
"path":"/messaging_service/totaltimeouts",
"operations":[
{
"method":"GET",
"summary":"Get the number of timeout messages",
"type":"array",
"items":{
"type":"message_counter"
},
"nickname":"get_timeout_messages",
"summary":"Total number of timeouts happened on this node",
"type":"long",
"nickname":"get_totaltimeouts",
"produces":[
"application/json"
],
@@ -28,7 +25,7 @@
]
},
{
"path":"/messaging_service/messages/dropped_by_ver",
"path":"/messaging_service/messages/dropped",
"operations":[
{
"method":"GET",
@@ -37,25 +34,6 @@
"items":{
"type":"verb_counter"
},
"nickname":"get_dropped_messages_by_ver",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/messaging_service/messages/dropped",
"operations":[
{
"method":"GET",
"summary":"Get the number of messages that were dropped before sending",
"type":"array",
"items":{
"type":"message_counter"
},
"nickname":"get_dropped_messages",
"produces":[
"application/json"
@@ -165,49 +143,6 @@
]
}
]
},
{
"path":"/messaging_service/messages/respond_completed",
"operations":[
{
"method":"GET",
"summary":"Get the number of completed respond messages",
"type":"array",
"items":{
"type":"message_counter"
},
"nickname":"get_respond_completed_messages",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/messaging_service/version",
"operations":[
{
"method":"GET",
"summary":"Get the version number",
"type":"int",
"nickname":"get_version",
"produces":[
"application/json"
],
"parameters":[
{
"name":"addr",
"description":"Address",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"query"
}
]
}
]
}
],
"models":{
@@ -215,10 +150,10 @@
"id":"message_counter",
"description":"Holds command counters",
"properties":{
"value":{
"count":{
"type":"long"
},
"key":{
"ip":{
"type":"string"
}
}
@@ -233,28 +168,46 @@
"verb":{
"type":"string",
"enum":[
"CLIENT_ID",
"ECHO",
"MUTATION",
"MUTATION_DONE",
"READ_DATA",
"READ_MUTATION_DATA",
"READ_DIGEST",
"GOSSIP_DIGEST_SYN",
"GOSSIP_DIGEST_ACK2",
"GOSSIP_SHUTDOWN",
"DEFINITIONS_UPDATE",
"TRUNCATE",
"REPLICATION_FINISHED",
"MIGRATION_REQUEST",
"STREAM_INIT_MESSAGE",
"PREPARE_MESSAGE",
"PREPARE_DONE_MESSAGE",
"STREAM_MUTATION",
"STREAM_MUTATION_DONE",
"COMPLETE_MESSAGE",
"REPAIR_CHECKSUM_RANGE",
"GET_SCHEMA_VERSION"
"MUTATION",
"BINARY",
"READ_REPAIR",
"READ",
"REQUEST_RESPONSE",
"STREAM_INITIATE",
"STREAM_INITIATE_DONE",
"STREAM_REPLY",
"STREAM_REQUEST",
"RANGE_SLICE",
"BOOTSTRAP_TOKEN",
"TREE_REQUEST",
"TREE_RESPONSE",
"JOIN",
"GOSSIP_DIGEST_SYN",
"GOSSIP_DIGEST_ACK",
"GOSSIP_DIGEST_ACK2",
"DEFINITIONS_ANNOUNCE",
"DEFINITIONS_UPDATE",
"TRUNCATE",
"SCHEMA_CHECK",
"INDEX_SCAN",
"REPLICATION_FINISHED",
"INTERNAL_RESPONSE",
"COUNTER_MUTATION",
"STREAMING_REPAIR_REQUEST",
"STREAMING_REPAIR_RESPONSE",
"SNAPSHOT",
"MIGRATION_REQUEST",
"GOSSIP_SHUTDOWN",
"_TRACE",
"ECHO",
"REPAIR_MESSAGE",
"PAXOS_PREPARE",
"PAXOS_PROPOSE",
"PAXOS_COMMIT",
"PAGED_RANGE",
"UNUSED_1",
"UNUSED_2",
"UNUSED_3"
]
}
}

View File

@@ -193,8 +193,8 @@
"operations":[
{
"method":"GET",
"summary":"Get the RPC timeout in seconds",
"type":"double",
"summary":"Get the RPC timeout",
"type":"long",
"nickname":"get_rpc_timeout",
"produces":[
"application/json"
@@ -214,10 +214,10 @@
"parameters":[
{
"name":"timeout",
"description":"Timeout in seconds",
"description":"Timeout in millis",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -229,8 +229,8 @@
"operations":[
{
"method":"GET",
"summary":"Get the read RPC timeout in seconds",
"type":"double",
"summary":"Get the read RPC timeout",
"type":"long",
"nickname":"get_read_rpc_timeout",
"produces":[
"application/json"
@@ -250,10 +250,10 @@
"parameters":[
{
"name":"timeout",
"description":"The timeout in second",
"description":"timeout_in_millis",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -265,8 +265,8 @@
"operations":[
{
"method":"GET",
"summary":"Get the write RPC timeout in seconds",
"type":"double",
"summary":"Get the write RPC timeout",
"type":"long",
"nickname":"get_write_rpc_timeout",
"produces":[
"application/json"
@@ -286,10 +286,10 @@
"parameters":[
{
"name":"timeout",
"description":"timeout in seconds",
"description":"timeout in millisecond",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -301,8 +301,8 @@
"operations":[
{
"method":"GET",
"summary":"Get counter write rpc timeout in seconds",
"type":"double",
"summary":"Get counter write rpc timeout",
"type":"long",
"nickname":"get_counter_write_rpc_timeout",
"produces":[
"application/json"
@@ -322,10 +322,10 @@
"parameters":[
{
"name":"timeout",
"description":"timeout in seconds",
"description":"timeout in millisecond",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -337,8 +337,8 @@
"operations":[
{
"method":"GET",
"summary":"Get CAS contention timeout in seconds",
"type":"double",
"summary":"Get CAS contention timeout",
"type":"long",
"nickname":"get_cas_contention_timeout",
"produces":[
"application/json"
@@ -358,10 +358,10 @@
"parameters":[
{
"name":"timeout",
"description":"timeout in second",
"description":"timeout in millisecond",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -373,8 +373,8 @@
"operations":[
{
"method":"GET",
"summary":"Get range rpc timeout in seconds",
"type":"double",
"summary":"Get range rpc timeout",
"type":"long",
"nickname":"get_range_rpc_timeout",
"produces":[
"application/json"
@@ -394,10 +394,10 @@
"parameters":[
{
"name":"timeout",
"description":"timeout in second",
"description":"timeout in millisecond",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -409,8 +409,8 @@
"operations":[
{
"method":"GET",
"summary":"Get truncate rpc timeout in seconds",
"type":"double",
"summary":"Get truncate rpc timeout",
"type":"long",
"nickname":"get_truncate_rpc_timeout",
"produces":[
"application/json"
@@ -430,10 +430,10 @@
"parameters":[
{
"name":"timeout",
"description":"timeout in second",
"description":"timeout in millisecond",
"required":true,
"allowMultiple":false,
"type":"double",
"type":"long",
"paramType":"query"
}
]
@@ -717,7 +717,7 @@
]
},
{
"path": "/storage_proxy/metrics/read/histogram",
"path": "/storage_proxy/metrics/read/latency/histogram",
"operations": [
{
"method": "GET",
@@ -732,7 +732,7 @@
]
},
{
"path": "/storage_proxy/metrics/range/histogram",
"path": "/storage_proxy/metrics/range/latency/histogram",
"operations": [
{
"method": "GET",
@@ -807,7 +807,7 @@
]
},
{
"path": "/storage_proxy/metrics/write/histogram",
"path": "/storage_proxy/metrics/write/latency/histogram",
"operations": [
{
"method": "GET",
@@ -820,103 +820,7 @@
"parameters": []
}
]
},
{
"path":"/storage_proxy/metrics/read/estimated_histogram/",
"operations":[
{
"method":"GET",
"summary":"Get read estimated latency",
"$ref":"#/utils/estimated_histogram",
"nickname":"get_read_estimated_histogram",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/storage_proxy/metrics/read",
"operations":[
{
"method":"GET",
"summary":"Get read latency",
"type":"int",
"nickname":"get_read_latency",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/storage_proxy/metrics/write/estimated_histogram/",
"operations":[
{
"method":"GET",
"summary":"Get write estimated latency",
"$ref":"#/utils/estimated_histogram",
"nickname":"get_write_estimated_histogram",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/storage_proxy/metrics/write",
"operations":[
{
"method":"GET",
"summary":"Get write latency",
"type":"int",
"nickname":"get_write_latency",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/storage_proxy/metrics/range/estimated_histogram/",
"operations":[
{
"method":"GET",
"summary":"Get range estimated latency",
"$ref":"#/utils/estimated_histogram",
"nickname":"get_range_estimated_histogram",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/storage_proxy/metrics/range",
"operations":[
{
"method":"GET",
"summary":"Get range latency",
"type":"int",
"nickname":"get_range_latency",
"produces":[
"application/json"
],
"parameters":[
]
}
]
}
}
],
"models":{
"mapper_list":{

View File

@@ -290,25 +290,6 @@
}
]
},
{
"path":"/storage_service/describe_ring/",
"operations":[
{
"method":"GET",
"summary":"The TokenRange for a any keyspace",
"type":"array",
"items":{
"type":"token_range"
},
"nickname":"describe_any_ring",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/storage_service/describe_ring/{keyspace}",
"operations":[
@@ -317,9 +298,9 @@
"summary":"The TokenRange for a given keyspace",
"type":"array",
"items":{
"type":"token_range"
"type":"string"
},
"nickname":"describe_ring",
"nickname":"describe_ring_jmx",
"produces":[
"application/json"
],
@@ -330,7 +311,7 @@
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
"paramType":"query"
}
]
}
@@ -425,7 +406,7 @@
"summary":"load value. Keys are IP addresses",
"type":"array",
"items":{
"type":"map_string_double"
"type":"mapper"
},
"nickname":"get_load_map",
"produces":[
@@ -628,7 +609,7 @@
"path":"/storage_service/keyspace_cleanup/{keyspace}",
"operations":[
{
"method":"POST",
"method":"GET",
"summary":"Trigger a cleanup of keys on a single keyspace",
"type":"int",
"nickname":"force_keyspace_cleanup",
@@ -797,72 +778,8 @@
"paramType":"path"
},
{
"name":"primaryRange",
"description":"If the value is the string 'true' with any capitalization, repair only the first range returned by the partitioner.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"parallelism",
"description":"Repair parallelism, can be 0 (sequential), 1 (parallel) or 2 (datacenter-aware).",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"incremental",
"description":"If the value is the string 'true' with any capitalization, perform incremental repair.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"jobThreads",
"description":"An integer specifying the parallelism on each node.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"ranges",
"description":"An explicit list of ranges to repair, overriding the default choice. Each range is expressed as token1:token2, and multiple ranges can be given as a comma separated list.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"columnFamilies",
"description":"Which column families to repair in the given keyspace. Multiple columns families can be named separated by commas. If this option is missing, all column families in the keyspace are repaired.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"dataCenters",
"description":"Which data centers are to participate in this repair. Multiple data centers can be listed separated by commas.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"hosts",
"description":"Which hosts are to participate in this repair. Multiple hosts can be listed separated by commas.",
"required":false,
"allowMultiple":false,
"type":"string",
"paramType":"query"
},
{
"name":"trace",
"description":"If the value is the string 'true' with any capitalization, enable tracing of the repair.",
"name":"options",
"description":"Options for the repair",
"required":false,
"allowMultiple":false,
"type":"string",
@@ -973,8 +890,8 @@
],
"parameters":[
{
"name":"host_id",
"description":"Remove the node with host_id from the cluster",
"name":"token",
"description":"The token to remove",
"required":true,
"allowMultiple":false,
"type":"string",
@@ -2028,20 +1945,6 @@
}
}
},
"map_string_double":{
"id":"map_string_double",
"description":"A key value mapping between a string and a double",
"properties":{
"key":{
"type":"string",
"description":"The key"
},
"value":{
"type":"double",
"description":"The value"
}
}
},
"maplist_mapper":{
"id":"maplist_mapper",
"description":"A key value mapping, where key and value are list",
@@ -2066,9 +1969,9 @@
"id":"snapshot",
"description":"Snapshot detail",
"properties":{
"ks":{
"key":{
"type":"string",
"description":"The key space snapshot key"
"description":"The key snapshot key"
},
"cf":{
"type":"string",
@@ -2090,7 +1993,7 @@
"properties":{
"key":{
"type":"string",
"description":"The snapshot key"
"description":"The keyspace"
},
"value":{
"type":"array",
@@ -2100,59 +2003,6 @@
"description":"The column family"
}
}
},
"endpoint_detail":{
"id":"endpoint_detail",
"description":"Endpoint detail",
"properties":{
"host":{
"type":"string",
"description":"The endpoint host"
},
"datacenter":{
"type":"string",
"description":"The endpoint datacenter"
},
"rack":{
"type":"string",
"description":"The endpoint rack"
}
}
},
"token_range":{
"id":"token_range",
"description":"Endpoint range information",
"properties":{
"start_token":{
"type":"string",
"description":"The range start token"
},
"end_token":{
"type":"string",
"description":"The range start token"
},
"endpoints":{
"type":"array",
"items":{
"type":"string"
},
"description":"The endpoints"
},
"rpc_endpoints":{
"type":"array",
"items":{
"type":"string"
},
"description":"The rpc endpoints"
},
"endpoint_details":{
"type":"array",
"items":{
"type":"endpoint_detail"
},
"description":"The endpoint details"
}
}
}
}
}

View File

@@ -25,102 +25,6 @@
]
}
]
},
{
"path":"/stream_manager/metrics/outbound",
"operations":[
{
"method":"GET",
"summary":"Get number of active outbound streams",
"type":"int",
"nickname":"get_all_active_streams_outbound",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/stream_manager/metrics/incoming/{peer}",
"operations":[
{
"method":"GET",
"summary":"Get total incoming bytes",
"type":"int",
"nickname":"get_total_incoming_bytes",
"produces":[
"application/json"
],
"parameters":[
{
"name":"peer",
"description":"The stream peer",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
}
]
},
{
"path":"/stream_manager/metrics/incoming",
"operations":[
{
"method":"GET",
"summary":"Get all total incoming bytes",
"type":"int",
"nickname":"get_all_total_incoming_bytes",
"produces":[
"application/json"
],
"parameters":[
]
}
]
},
{
"path":"/stream_manager/metrics/outgoing/{peer}",
"operations":[
{
"method":"GET",
"summary":"Get total outgoing bytes",
"type":"int",
"nickname":"get_total_outgoing_bytes",
"produces":[
"application/json"
],
"parameters":[
{
"name":"peer",
"description":"The stream peer",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
}
]
},
{
"path":"/stream_manager/metrics/outgoing",
"operations":[
{
"method":"GET",
"summary":"Get all total outgoing bytes",
"type":"int",
"nickname":"get_all_total_outgoing_bytes",
"produces":[
"application/json"
],
"parameters":[
]
}
]
}
],
"models":{

View File

@@ -1,114 +0,0 @@
{
"apiVersion":"0.0.1",
"swaggerVersion":"1.2",
"basePath":"{{Protocol}}://{{Host}}",
"resourcePath":"/system",
"produces":[
"application/json"
],
"apis":[
{
"path":"/system/logger",
"operations":[
{
"method":"GET",
"summary":"Get all logger names",
"type":"array",
"items":{
"type":"string"
},
"nickname":"get_all_logger_names",
"produces":[
"application/json"
],
"parameters":[
]
},
{
"method":"POST",
"summary":"Set all logger level",
"type":"void",
"nickname":"set_all_logger_level",
"produces":[
"application/json"
],
"parameters":[
{
"name":"level",
"description":"The new log level",
"required":true,
"allowMultiple":false,
"type":"string",
"enum":[
"error",
"warn",
"info",
"debug",
"trace"
],
"paramType":"query"
}
]
}
]
},
{
"path":"/system/logger/{name}",
"operations":[
{
"method":"GET",
"summary":"Get logger level",
"type":"string",
"nickname":"get_logger_level",
"produces":[
"application/json"
],
"parameters":[
{
"name":"name",
"description":"The logger to query about",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
}
]
},
{
"method":"POST",
"summary":"Set logger level",
"type":"void",
"nickname":"set_logger_level",
"produces":[
"application/json"
],
"parameters":[
{
"name":"name",
"description":"The logger to query about",
"required":true,
"allowMultiple":false,
"type":"string",
"paramType":"path"
},
{
"name":"level",
"description":"The new log level",
"required":true,
"allowMultiple":false,
"type":"string",
"enum":[
"error",
"warn",
"info",
"debug",
"trace"
],
"paramType":"query"
}
]
}
]
}
]
}

View File

@@ -38,7 +38,6 @@
#include "hinted_handoff.hh"
#include "http/exception.hh"
#include "stream_manager.hh"
#include "system.hh"
namespace api {
@@ -109,10 +108,6 @@ future<> set_server(http_context& ctx) {
rb->register_function(r, "stream_manager",
"The stream manager API");
set_stream_manager(ctx, r);
rb->register_function(r, "system",
"The system related API");
set_system(ctx, r);
});
}

View File

@@ -128,54 +128,47 @@ inline double pow2(double a) {
return a * a;
}
// FIXME: Move to utils::ihistogram::operator+=()
inline utils::ihistogram add_histogram(utils::ihistogram res,
inline httpd::utils_json::histogram add_histogram(httpd::utils_json::histogram res,
const utils::ihistogram& val) {
if (res.count == 0) {
return val;
if (!res.count._set) {
res = val;
return res;
}
if (val.count == 0) {
return std::move(res);
return res;
}
if (res.min > val.min) {
if (res.min() > val.min) {
res.min = val.min;
}
if (res.max < val.max) {
if (res.max() < val.max) {
res.max = val.max;
}
double ncount = res.count + val.count;
double ncount = res.count() + val.count;
// To get an estimated sum we take the estimated mean
// and multiply it by the true count
res.sum = res.sum + val.mean * val.count;
double a = res.count/ncount;
res.sum = res.sum() + val.mean * val.count;
double a = res.count()/ncount;
double b = val.count/ncount;
double mean = a * res.mean + b * val.mean;
double mean = a * res.mean() + b * val.mean;
res.variance = (res.variance + pow2(res.mean - mean) )* a +
res.variance = (res.variance() + pow2(res.mean() - mean) )* a +
(val.variance + pow2(val.mean -mean))* b;
res.mean = mean;
res.count = res.count + val.count;
res.count = res.count() + val.count;
for (auto i : val.sample) {
res.sample.push_back(i);
res.sample.push(i);
}
return res;
}
inline
httpd::utils_json::histogram to_json(const utils::ihistogram& val) {
httpd::utils_json::histogram h;
h = val;
return h;
}
template<class T, class F>
future<json::json_return_type> sum_histogram_stats(distributed<T>& d, utils::ihistogram F::*f) {
return d.map_reduce0([f](const T& p) {return p.get_stats().*f;}, utils::ihistogram(),
add_histogram).then([](const utils::ihistogram& val) {
return make_ready_future<json::json_return_type>(to_json(val));
return d.map_reduce0([f](const T& p) {return p.get_stats().*f;}, httpd::utils_json::histogram(),
add_histogram).then([](const httpd::utils_json::histogram& val) {
return make_ready_future<json::json_return_type>(val);
});
}

View File

@@ -64,21 +64,21 @@ future<> foreach_column_family(http_context& ctx, const sstring& name, function<
future<json::json_return_type> get_cf_stats(http_context& ctx, const sstring& name,
int64_t column_family::stats::*f) {
return map_reduce_cf(ctx, name, int64_t(0), [f](const column_family& cf) {
return map_reduce_cf(ctx, name, 0, [f](const column_family& cf) {
return cf.get_stats().*f;
}, std::plus<int64_t>());
}
future<json::json_return_type> get_cf_stats(http_context& ctx,
int64_t column_family::stats::*f) {
return map_reduce_cf(ctx, int64_t(0), [f](const column_family& cf) {
return map_reduce_cf(ctx, 0, [f](const column_family& cf) {
return cf.get_stats().*f;
}, std::plus<int64_t>());
}
static future<json::json_return_type> get_cf_stats_count(http_context& ctx, const sstring& name,
utils::ihistogram column_family::stats::*f) {
return map_reduce_cf(ctx, name, int64_t(0), [f](const column_family& cf) {
return map_reduce_cf(ctx, name, 0, [f](const column_family& cf) {
return (cf.get_stats().*f).count;
}, std::plus<int64_t>());
}
@@ -101,7 +101,7 @@ static future<json::json_return_type> get_cf_stats_sum(http_context& ctx, const
static future<json::json_return_type> get_cf_stats_count(http_context& ctx,
utils::ihistogram column_family::stats::*f) {
return map_reduce_cf(ctx, int64_t(0), [f](const column_family& cf) {
return map_reduce_cf(ctx, 0, [f](const column_family& cf) {
return (cf.get_stats().*f).count;
}, std::plus<int64_t>());
}
@@ -110,30 +110,28 @@ static future<json::json_return_type> get_cf_histogram(http_context& ctx, const
utils::ihistogram column_family::stats::*f) {
utils::UUID uuid = get_uuid(name, ctx.db.local());
return ctx.db.map_reduce0([f, uuid](const database& p) {return p.find_column_family(uuid).get_stats().*f;},
utils::ihistogram(),
httpd::utils_json::histogram(),
add_histogram)
.then([](const utils::ihistogram& val) {
return make_ready_future<json::json_return_type>(to_json(val));
.then([](const httpd::utils_json::histogram& val) {
return make_ready_future<json::json_return_type>(val);
});
}
static future<json::json_return_type> get_cf_histogram(http_context& ctx, utils::ihistogram column_family::stats::*f) {
std::function<utils::ihistogram(const database&)> fun = [f] (const database& db) {
utils::ihistogram res;
std::function<httpd::utils_json::histogram(const database&)> fun = [f] (const database& db) {
httpd::utils_json::histogram res;
for (auto i : db.get_column_families()) {
res = add_histogram(res, i.second->get_stats().*f);
}
return res;
};
return ctx.db.map(fun).then([](const std::vector<utils::ihistogram> &res) {
std::vector<httpd::utils_json::histogram> r;
boost::copy(res | boost::adaptors::transformed(to_json), std::back_inserter(r));
return make_ready_future<json::json_return_type>(r);
return ctx.db.map(fun).then([](const std::vector<httpd::utils_json::histogram> &res) {
return make_ready_future<json::json_return_type>(res);
});
}
static future<json::json_return_type> get_cf_unleveled_sstables(http_context& ctx, const sstring& name) {
return map_reduce_cf(ctx, name, int64_t(0), [](const column_family& cf) {
return map_reduce_cf(ctx, name, 0, [](const column_family& cf) {
return cf.get_unleveled_sstables();
}, std::plus<int64_t>());
}
@@ -223,25 +221,25 @@ void set_column_family(http_context& ctx, routes& r) {
});
cf::get_memtable_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, req->param["name"], 0, [](column_family& cf) {
return cf.active_memtable().region().occupancy().total_space();
}, std::plus<int64_t>());
});
cf::get_all_memtable_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, 0, [](column_family& cf) {
return cf.active_memtable().region().occupancy().total_space();
}, std::plus<int64_t>());
});
cf::get_memtable_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, req->param["name"], 0, [](column_family& cf) {
return cf.active_memtable().region().occupancy().used_space();
}, std::plus<int64_t>());
});
cf::get_all_memtable_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, 0, [](column_family& cf) {
return cf.active_memtable().region().occupancy().used_space();
}, std::plus<int64_t>());
});
@@ -256,7 +254,7 @@ void set_column_family(http_context& ctx, routes& r) {
cf::get_cf_all_memtables_off_heap_size.set(r, [&ctx] (std::unique_ptr<request> req) {
warn(unimplemented::cause::INDEXES);
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, req->param["name"], 0, [](column_family& cf) {
return cf.occupancy().total_space();
}, std::plus<int64_t>());
});
@@ -265,21 +263,21 @@ void set_column_family(http_context& ctx, routes& r) {
warn(unimplemented::cause::INDEXES);
return ctx.db.map_reduce0([](const database& db){
return db.dirty_memory_region_group().memory_used();
}, int64_t(0), std::plus<int64_t>()).then([](int res) {
}, 0, std::plus<int64_t>()).then([](int res) {
return make_ready_future<json::json_return_type>(res);
});
});
cf::get_cf_all_memtables_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
warn(unimplemented::cause::INDEXES);
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, req->param["name"], 0, [](column_family& cf) {
return cf.occupancy().used_space();
}, std::plus<int64_t>());
});
cf::get_all_cf_all_memtables_live_data_size.set(r, [&ctx] (std::unique_ptr<request> req) {
warn(unimplemented::cause::INDEXES);
return map_reduce_cf(ctx, int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, 0, [](column_family& cf) {
return cf.active_memtable().region().occupancy().used_space();
}, std::plus<int64_t>());
});
@@ -304,7 +302,7 @@ void set_column_family(http_context& ctx, routes& r) {
});
cf::get_estimated_row_count.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](column_family& cf) {
return map_reduce_cf(ctx, req->param["name"], 0, [](column_family& cf) {
uint64_t res = 0;
for (auto i: *cf.get_sstables() ) {
res += i.second->get_stats_metadata().estimated_row_size.count();
@@ -372,7 +370,7 @@ void set_column_family(http_context& ctx, routes& r) {
});
cf::get_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::writes);
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::reads);
});
cf::get_all_write_latency_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
@@ -424,11 +422,11 @@ void set_column_family(http_context& ctx, routes& r) {
});
cf::get_max_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], int64_t(0), max_row_size, max_int64);
return map_reduce_cf(ctx, req->param["name"], 0, max_row_size, max_int64);
});
cf::get_all_max_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, int64_t(0), max_row_size, max_int64);
return map_reduce_cf(ctx, 0, max_row_size, max_int64);
});
cf::get_mean_row_size.set(r, [&ctx] (std::unique_ptr<request> req) {
@@ -539,20 +537,20 @@ void set_column_family(http_context& ctx, routes& r) {
}, std::plus<uint64_t>());
});
cf::get_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], uint64_t(0), [] (column_family& cf) {
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
return sst.second->get_summary().memory_footprint();
});
}, std::plus<uint64_t>());
cf::get_index_summary_off_heap_memory_used.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
// We are missing the off heap memory calculation
// Return 0 is the wrong value. It's a work around
// until the memory calculation will be available
//auto id = get_uuid(req->param["name"], ctx.db.local());
return make_ready_future<json::json_return_type>(0);
});
cf::get_all_index_summary_off_heap_memory_used.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, uint64_t(0), [] (column_family& cf) {
return std::accumulate(cf.get_sstables()->begin(), cf.get_sstables()->end(), uint64_t(0), [](uint64_t s, auto& sst) {
return sst.second->get_summary().memory_footprint();
});
}, std::plus<uint64_t>());
cf::get_all_index_summary_off_heap_memory_used.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
cf::get_compression_metadata_off_heap_memory_used.set(r, [] (std::unique_ptr<request> req) {
@@ -591,16 +589,11 @@ void set_column_family(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(0);
});
cf::get_true_snapshots_size.set(r, [&ctx] (std::unique_ptr<request> req) {
auto uuid = get_uuid(req->param["name"], ctx.db.local());
return ctx.db.local().find_column_family(uuid).get_snapshot_details().then([](
const std::unordered_map<sstring, column_family::snapshot_details>& sd) {
int64_t res = 0;
for (auto i : sd) {
res += i.second.total;
}
return make_ready_future<json::json_return_type>(res);
});
cf::get_true_snapshots_size.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
//auto id = get_uuid(req->param["name"], ctx.db.local());
return make_ready_future<json::json_return_type>(0);
});
cf::get_all_true_snapshots_size.set(r, [] (std::unique_ptr<request> req) {
@@ -622,29 +615,30 @@ void set_column_family(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(0);
});
cf::get_row_cache_hit.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](const column_family& cf) {
return cf.get_row_cache().stats().hits;
}, std::plus<int64_t>());
cf::get_row_cache_hit.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
//auto id = get_uuid(req->param["name"], ctx.db.local());
return make_ready_future<json::json_return_type>(0);
});
cf::get_all_row_cache_hit.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, int64_t(0), [](const column_family& cf) {
return cf.get_row_cache().stats().hits;
}, std::plus<int64_t>());
cf::get_all_row_cache_hit.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
cf::get_row_cache_miss.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], int64_t(0), [](const column_family& cf) {
return cf.get_row_cache().stats().misses;
}, std::plus<int64_t>());
cf::get_row_cache_miss.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
//auto id = get_uuid(req->param["name"], ctx.db.local());
return make_ready_future<json::json_return_type>(0);
});
cf::get_all_row_cache_miss.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, int64_t(0), [](const column_family& cf) {
return cf.get_row_cache().stats().misses;
}, std::plus<int64_t>());
cf::get_all_row_cache_miss.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
cf::get_cas_prepare.set(r, [] (std::unique_ptr<request> req) {
@@ -668,19 +662,41 @@ void set_column_family(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(0);
});
cf::get_sstables_per_read_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
return map_reduce_cf(ctx, req->param["name"], sstables::estimated_histogram(0), [](column_family& cf) {
return cf.get_stats().estimated_sstable_per_read;
},
sstables::merge, utils_json::estimated_histogram());
cf::get_sstables_per_read_histogram.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
//auto id = get_uuid(req->param["name"], ctx.db.local());
std::vector<double> res;
return make_ready_future<json::json_return_type>(res);
});
cf::get_tombstone_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::tombstone_scanned);
cf::get_tombstone_scanned_histogram.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
//auto id = get_uuid(req->param["name"], ctx.db.local());
httpd::utils_json::histogram res;
res.count = 0;
res.mean = 0;
res.max = 0;
res.min = 0;
res.sum = 0;
res.variance = 0;
return make_ready_future<json::json_return_type>(res);
});
cf::get_live_scanned_histogram.set(r, [&ctx] (std::unique_ptr<request> req) {
return get_cf_histogram(ctx, req->param["name"], &column_family::stats::live_scanned);
cf::get_live_scanned_histogram.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
//auto id = get_uuid(req->param["name"], ctx.db.local());
//std::vector<double> res;
httpd::utils_json::histogram res;
res.count = 0;
res.mean = 0;
res.max = 0;
res.min = 0;
res.sum = 0;
res.variance = 0;
return make_ready_future<json::json_return_type>(res);
});
cf::get_col_update_time_delta_histogram.set(r, [] (std::unique_ptr<request> req) {

View File

@@ -21,17 +21,16 @@
#include "compaction_manager.hh"
#include "api/api-doc/compaction_manager.json.hh"
#include "db/system_keyspace.hh"
namespace api {
using namespace scollectd;
namespace cm = httpd::compaction_manager_json;
using namespace json;
static future<json::json_return_type> get_cm_stats(http_context& ctx,
int64_t compaction_manager::stats::*f) {
return ctx.db.map_reduce0([f](database& db) {
return ctx.db.map_reduce0([&](database& db) {
return db.get_compaction_manager().get_stats().*f;
}, int64_t(0), std::plus<int64_t>()).then([](const int64_t& res) {
return make_ready_future<json::json_return_type>(res);
@@ -39,38 +38,29 @@ static future<json::json_return_type> get_cm_stats(http_context& ctx,
}
void set_compaction_manager(http_context& ctx, routes& r) {
cm::get_compactions.set(r, [&ctx] (std::unique_ptr<request> req) {
return ctx.db.map_reduce0([](database& db) {
std::vector<cm::summary> summaries;
const compaction_manager& cm = db.get_compaction_manager();
cm::get_compactions.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
std::vector<cm::jsonmap> map;
return make_ready_future<json::json_return_type>(map);
});
for (const auto& c : cm.get_compactions()) {
cm::summary s;
s.ks = c->ks;
s.cf = c->cf;
s.unit = "keys";
s.task_type = "compaction";
s.completed = c->total_keys_written;
s.total = c->total_partitions;
summaries.push_back(std::move(s));
}
return summaries;
}, std::vector<cm::summary>(), concat<cm::summary>).then([](const std::vector<cm::summary>& res) {
return make_ready_future<json::json_return_type>(res);
});
cm::get_compaction_summary.set(r, [] (std::unique_ptr<request> req) {
//TBD
unimplemented();
std::vector<sstring> res;
return make_ready_future<json::json_return_type>(res);
});
cm::force_user_defined_compaction.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
warn(unimplemented::cause::API);
return make_ready_future<json::json_return_type>(json_void());
unimplemented();
return make_ready_future<json::json_return_type>("");
});
cm::stop_compaction.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
warn(unimplemented::cause::API);
unimplemented();
return make_ready_future<json::json_return_type>("");
});
@@ -91,44 +81,11 @@ void set_compaction_manager(http_context& ctx, routes& r) {
cm::get_bytes_compacted.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
warn(unimplemented::cause::API);
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
cm::get_compaction_history.set(r, [] (std::unique_ptr<request> req) {
return db::system_keyspace::get_compaction_history().then([] (std::vector<db::system_keyspace::compaction_history_entry> history) {
std::vector<cm::history> res;
res.reserve(history.size());
for (auto& entry : history) {
cm::history h;
h.id = entry.id.to_sstring();
h.ks = std::move(entry.ks);
h.cf = std::move(entry.cf);
h.compacted_at = entry.compacted_at;
h.bytes_in = entry.bytes_in;
h.bytes_out = entry.bytes_out;
for (auto it : entry.rows_merged) {
httpd::compaction_manager_json::row_merged e;
e.key = it.first;
e.value = it.second;
h.rows_merged.push(std::move(e));
}
res.push_back(std::move(h));
}
return make_ready_future<json::json_return_type>(res);
});
});
cm::get_compaction_info.set(r, [] (std::unique_ptr<request> req) {
//TBD
// FIXME
warn(unimplemented::cause::API);
std::vector<cm::compaction_info> res;
return make_ready_future<json::json_return_type>(res);
});
}

View File

@@ -22,33 +22,15 @@
#include "failure_detector.hh"
#include "api/api-doc/failure_detector.json.hh"
#include "gms/failure_detector.hh"
#include "gms/application_state.hh"
#include "gms/gossiper.hh"
namespace api {
namespace fd = httpd::failure_detector_json;
void set_failure_detector(http_context& ctx, routes& r) {
fd::get_all_endpoint_states.set(r, [](std::unique_ptr<request> req) {
std::vector<fd::endpoint_state> res;
for (auto i : gms::get_local_gossiper().endpoint_state_map) {
fd::endpoint_state val;
val.addrs = boost::lexical_cast<std::string>(i.first);
val.is_alive = i.second.is_alive();
val.generation = i.second.get_heart_beat_state().get_generation();
val.version = i.second.get_heart_beat_state().get_heart_beat_version();
val.update_time = i.second.get_update_timestamp().time_since_epoch().count();
for (auto a : i.second.get_application_state_map()) {
fd::version_value version_val;
// We return the enum index and not it's name to stay compatible to origin
// method that the state index are static but the name can be changed.
version_val.application_state = static_cast<std::underlying_type<gms::application_state>::type>(a.first);
version_val.value = a.second.value;
val.application_state.push(version_val);
}
res.push_back(val);
}
return make_ready_future<json::json_return_type>(res);
return gms::get_all_endpoint_states().then([](const sstring& str) {
return make_ready_future<json::json_return_type>(str);
});
});
fd::get_up_endpoint_count.set(r, [](std::unique_ptr<request> req) {

View File

@@ -27,43 +27,47 @@ namespace api {
using namespace json;
void set_gossiper(http_context& ctx, routes& r) {
httpd::gossiper_json::get_down_endpoint.set(r, [] (const_req req) {
auto res = gms::get_local_gossiper().get_unreachable_members();
return container_to_vec(res);
httpd::gossiper_json::get_down_endpoint.set(r, [](std::unique_ptr<request> req) {
return gms::get_unreachable_members().then([](std::set<gms::inet_address> res) {
return make_ready_future<json::json_return_type>(container_to_vec(res));
});
});
httpd::gossiper_json::get_live_endpoint.set(r, [] (const_req req) {
auto res = gms::get_local_gossiper().get_live_members();
return container_to_vec(res);
httpd::gossiper_json::get_live_endpoint.set(r, [](std::unique_ptr<request> req) {
return gms::get_live_members().then([](std::set<gms::inet_address> res) {
return make_ready_future<json::json_return_type>(container_to_vec(res));
});
});
httpd::gossiper_json::get_endpoint_downtime.set(r, [] (const_req req) {
gms::inet_address ep(req.param["addr"]);
return gms::get_local_gossiper().get_endpoint_downtime(ep);
});
httpd::gossiper_json::get_current_generation_number.set(r, [] (std::unique_ptr<request> req) {
httpd::gossiper_json::get_endpoint_downtime.set(r, [](std::unique_ptr<request> req) {
gms::inet_address ep(req->param["addr"]);
return gms::get_local_gossiper().get_current_generation_number(ep).then([] (int res) {
return gms::get_endpoint_downtime(ep).then([](int64_t res) {
return make_ready_future<json::json_return_type>(res);
});
});
httpd::gossiper_json::get_current_heart_beat_version.set(r, [] (std::unique_ptr<request> req) {
httpd::gossiper_json::get_current_generation_number.set(r, [](std::unique_ptr<request> req) {
gms::inet_address ep(req->param["addr"]);
return gms::get_local_gossiper().get_current_heart_beat_version(ep).then([] (int res) {
return gms::get_current_generation_number(ep).then([](int res) {
return make_ready_future<json::json_return_type>(res);
});
});
httpd::gossiper_json::get_current_heart_beat_version.set(r, [](std::unique_ptr<request> req) {
gms::inet_address ep(req->param["addr"]);
return gms::get_current_heart_beat_version(ep).then([](int res) {
return make_ready_future<json::json_return_type>(res);
});
});
httpd::gossiper_json::assassinate_endpoint.set(r, [](std::unique_ptr<request> req) {
if (req->get_query_param("unsafe") != "True") {
return gms::get_local_gossiper().assassinate_endpoint(req->param["addr"]).then([] {
return make_ready_future<json::json_return_type>(json_void());
return gms::assassinate_endpoint(req->param["addr"]).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
}
return gms::get_local_gossiper().unsafe_assassinate_endpoint(req->param["addr"]).then([] {
return make_ready_future<json::json_return_type>(json_void());
return gms::unsafe_assassinate_endpoint(req->param["addr"]).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
});
}

View File

@@ -32,17 +32,17 @@ using namespace net;
namespace api {
using shard_info = messaging_service::shard_info;
using msg_addr = messaging_service::msg_addr;
using shard_id = messaging_service::shard_id;
static const int32_t num_verb = static_cast<int32_t>(messaging_verb::LAST);
static const int32_t num_verb = static_cast<int32_t>(messaging_verb::UNUSED_3) + 1;
std::vector<message_counter> map_to_message_counters(
const std::unordered_map<gms::inet_address, unsigned long>& map) {
std::vector<message_counter> res;
for (auto i : map) {
res.push_back(message_counter());
res.back().key = boost::lexical_cast<sstring>(i.first);
res.back().value = i.second;
res.back().ip = boost::lexical_cast<sstring>(i.first);
res.back().count = i.second;
}
return res;
}
@@ -58,7 +58,7 @@ future_json_function get_client_getter(std::function<uint64_t(const shard_info&)
using map_type = std::unordered_map<gms::inet_address, uint64_t>;
auto get_shard_map = [f](messaging_service& ms) {
std::unordered_map<gms::inet_address, unsigned long> map;
ms.foreach_client([&map, f] (const msg_addr& id, const shard_info& info) {
ms.foreach_client([&map, f] (const shard_id& id, const shard_info& info) {
map[id.addr] = f(info);
});
return map;
@@ -70,39 +70,12 @@ future_json_function get_client_getter(std::function<uint64_t(const shard_info&)
};
}
future_json_function get_server_getter(std::function<uint64_t(const rpc::stats&)> f) {
return [f](std::unique_ptr<request> req) {
using map_type = std::unordered_map<gms::inet_address, uint64_t>;
auto get_shard_map = [f](messaging_service& ms) {
std::unordered_map<gms::inet_address, unsigned long> map;
ms.foreach_server_connection_stats([&map, f] (const rpc::client_info& info, const rpc::stats& stats) mutable {
map[gms::inet_address(net::ipv4_address(info.addr))] = f(stats);
});
return map;
};
return get_messaging_service().map_reduce0(get_shard_map, map_type(), map_sum<map_type>).
then([](map_type&& map) {
return make_ready_future<json::json_return_type>(map_to_message_counters(map));
});
};
}
void set_messaging_service(http_context& ctx, routes& r) {
get_timeout_messages.set(r, get_client_getter([](const shard_info& c) {
return c.get_stats().timeout;
}));
get_sent_messages.set(r, get_client_getter([](const shard_info& c) {
return c.get_stats().sent_messages;
}));
get_dropped_messages.set(r, get_client_getter([](const shard_info& c) {
// We don't have the same drop message mechanism
// as origin has.
// hence we can always return 0
return 0;
}));
get_exception_messages.set(r, get_client_getter([](const shard_info& c) {
return c.get_stats().exception_received;
}));
@@ -111,20 +84,12 @@ void set_messaging_service(http_context& ctx, routes& r) {
return c.get_stats().pending;
}));
get_respond_pending_messages.set(r, get_server_getter([](const rpc::stats& c) {
return c.pending;
get_respond_pending_messages.set(r, get_client_getter([](const shard_info& c) {
return c.get_stats().wait_reply;
}));
get_respond_completed_messages.set(r, get_server_getter([](const rpc::stats& c) {
return c.sent_messages;
}));
get_version.set(r, [](const_req req) {
return net::get_local_messaging_service().get_raw_version(req.get_query_param("addr"));
});
get_dropped_messages_by_ver.set(r, [](std::unique_ptr<request> req) {
shared_ptr<std::vector<uint64_t>> map = make_shared<std::vector<uint64_t>>(num_verb);
get_dropped_messages.set(r, [](std::unique_ptr<request> req) {
shared_ptr<std::vector<uint64_t>> map = make_shared<std::vector<uint64_t>>(num_verb, 0);
return net::get_messaging_service().map_reduce([map](const uint64_t* local_map) mutable {
for (auto i = 0; i < num_verb; i++) {
@@ -137,12 +102,8 @@ void set_messaging_service(http_context& ctx, routes& r) {
for (auto i : verb_counter::verb_wrapper::all_items()) {
verb_counter c;
messaging_verb v = i; // for type safety we use messaging_verb values
auto idx = static_cast<uint32_t>(v);
if (idx >= map->size()) {
throw std::runtime_error(sprint("verb index out of bounds: %lu, map size: %lu", idx, map->size()));
}
if ((*map)[idx] > 0) {
c.count = (*map)[idx];
if ((*map)[static_cast<int32_t>(v)] > 0) {
c.count = (*map)[static_cast<int32_t>(v)];
c.verb = i;
res.push_back(c);
}

View File

@@ -23,9 +23,6 @@
#include "service/storage_proxy.hh"
#include "api/api-doc/storage_proxy.json.hh"
#include "api/api-doc/utils.json.hh"
#include "service/storage_service.hh"
#include "db/config.hh"
#include "utils/histogram.hh"
namespace api {
@@ -33,23 +30,6 @@ namespace sp = httpd::storage_proxy_json;
using proxy = service::storage_proxy;
using namespace json;
static future<json::json_return_type> sum_estimated_histogram(http_context& ctx, sstables::estimated_histogram proxy::stats::*f) {
return ctx.sp.map_reduce0([f](const proxy& p) {return p.get_stats().*f;}, sstables::estimated_histogram(),
sstables::merge).then([](const sstables::estimated_histogram& val) {
utils_json::estimated_histogram res;
res = val;
return make_ready_future<json::json_return_type>(res);
});
}
static future<json::json_return_type> total_latency(http_context& ctx, utils::ihistogram proxy::stats::*f) {
return ctx.sp.map_reduce0([f](const proxy& p) {return (p.get_stats().*f).mean * (p.get_stats().*f).count;}, 0.0,
std::plus<double>()).then([](double val) {
int64_t res = val;
return make_ready_future<json::json_return_type>(res);
});
}
void set_storage_proxy(http_context& ctx, routes& r) {
sp::get_total_hints.set(r, [](std::unique_ptr<request> req) {
//TBD
@@ -59,9 +39,7 @@ void set_storage_proxy(http_context& ctx, routes& r) {
sp::get_hinted_handoff_enabled.set(r, [](std::unique_ptr<request> req) {
//TBD
// FIXME
// hinted handoff is not supported currently,
// so we should return false
unimplemented();
return make_ready_future<json::json_return_type>(false);
});
@@ -118,8 +96,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(0);
});
sp::get_rpc_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().request_timeout_in_ms()/1000.0;
sp::get_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(1);
});
sp::set_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
@@ -129,8 +109,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_read_rpc_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().read_request_timeout_in_ms()/1000.0;
sp::get_read_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::set_read_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
@@ -140,8 +122,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_write_rpc_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().write_request_timeout_in_ms()/1000.0;
sp::get_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::set_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
@@ -151,10 +135,11 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_counter_write_rpc_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().counter_write_request_timeout_in_ms()/1000.0;
sp::get_counter_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::set_counter_write_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
@@ -162,8 +147,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_cas_contention_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().cas_contention_timeout_in_ms()/1000.0;
sp::get_cas_contention_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::set_cas_contention_timeout.set(r, [](std::unique_ptr<request> req) {
@@ -173,8 +160,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_range_rpc_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().range_request_timeout_in_ms()/1000.0;
sp::get_range_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::set_range_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
@@ -184,8 +173,10 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_truncate_rpc_timeout.set(r, [&ctx](const_req req) {
return ctx.db.local().get_config().truncate_request_timeout_in_ms()/1000.0;
sp::get_truncate_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::set_truncate_rpc_timeout.set(r, [](std::unique_ptr<request> req) {
@@ -201,28 +192,28 @@ void set_storage_proxy(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
sp::get_read_repair_attempted.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_stats(ctx.sp, &proxy::stats::read_repair_attempts);
sp::get_read_repair_attempted.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::get_read_repair_repaired_blocking.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_stats(ctx.sp, &proxy::stats::read_repair_repaired_blocking);
sp::get_read_repair_repaired_blocking.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::get_read_repair_repaired_background.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_stats(ctx.sp, &proxy::stats::read_repair_repaired_background);
sp::get_read_repair_repaired_background.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
sp::get_schema_versions.set(r, [](std::unique_ptr<request> req) {
//TBD
// FIXME
// describe_schema_versions is not implemented yet
// this is a work around
unimplemented();
std::vector<sp::mapper_list> res;
sp::mapper_list entry;
entry.key = boost::lexical_cast<std::string>(utils::fb_utilities::get_broadcast_address());
entry.value.push(service::get_local_storage_service().get_schema_version());
res.push_back(entry);
return make_ready_future<json::json_return_type>(res);
});
@@ -325,29 +316,6 @@ void set_storage_proxy(http_context& ctx, routes& r) {
sp::get_read_metrics_latency_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_histogram_stats(ctx.sp, &proxy::stats::read);
});
sp::get_read_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_estimated_histogram(ctx, &proxy::stats::estimated_read);
});
sp::get_read_latency.set(r, [&ctx](std::unique_ptr<request> req) {
return total_latency(ctx, &proxy::stats::read);
});
sp::get_write_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_estimated_histogram(ctx, &proxy::stats::estimated_write);
});
sp::get_write_latency.set(r, [&ctx](std::unique_ptr<request> req) {
return total_latency(ctx, &proxy::stats::write);
});
sp::get_range_estimated_histogram.set(r, [&ctx](std::unique_ptr<request> req) {
return sum_histogram_stats(ctx.sp, &proxy::stats::read);
});
sp::get_range_latency.set(r, [&ctx](std::unique_ptr<request> req) {
return total_latency(ctx, &proxy::stats::range);
});
}
}

View File

@@ -30,6 +30,8 @@
#include "repair/repair.hh"
#include "locator/snitch_base.hh"
#include "column_family.hh"
#include <unordered_map>
#include "utils/fb_utilities.hh"
namespace api {
@@ -43,29 +45,6 @@ static sstring validate_keyspace(http_context& ctx, const parameters& param) {
throw bad_param_exception("Keyspace " + param["keyspace"] + " Does not exist");
}
static std::vector<ss::token_range> describe_ring(const sstring& keyspace) {
std::vector<ss::token_range> res;
for (auto d : service::get_local_storage_service().describe_ring(keyspace)) {
ss::token_range r;
r.start_token = d._start_token;
r.end_token = d._end_token;
r.endpoints = d._endpoints;
r.rpc_endpoints = d._rpc_endpoints;
for (auto det : d._endpoint_details) {
ss::endpoint_detail ed;
ed.host = det._host;
ed.datacenter = det._datacenter;
if (det._rack != "") {
ed.rack = det._rack;
}
r.endpoint_details.push(ed);
}
res.push_back(r);
}
return res;
}
void set_storage_service(http_context& ctx, routes& r) {
ss::local_hostid.set(r, [](std::unique_ptr<request> req) {
return db::system_keyspace::get_local_host_id().then([](const utils::UUID& id) {
@@ -73,25 +52,28 @@ void set_storage_service(http_context& ctx, routes& r) {
});
});
ss::get_tokens.set(r, [] (const_req req) {
auto tokens = service::get_local_storage_service().get_token_metadata().sorted_tokens();
return container_to_vec(tokens);
ss::get_tokens.set(r, [](std::unique_ptr<request> req) {
return service::sorted_tokens().then([](const std::vector<dht::token>& tokens) {
return make_ready_future<json::json_return_type>(container_to_vec(tokens));
});
});
ss::get_node_tokens.set(r, [] (const_req req) {
gms::inet_address addr(req.param["endpoint"]);
auto tokens = service::get_local_storage_service().get_token_metadata().get_tokens(addr);
return container_to_vec(tokens);
ss::get_node_tokens.set(r, [](std::unique_ptr<request> req) {
gms::inet_address addr(req->param["endpoint"]);
return service::get_tokens(addr).then([](const std::vector<dht::token>& tokens) {
return make_ready_future<json::json_return_type>(container_to_vec(tokens));
});
});
ss::get_commitlog.set(r, [&ctx](const_req req) {
return ctx.db.local().commitlog()->active_config().commit_log_location;
});
ss::get_token_endpoint.set(r, [] (const_req req) {
auto token_to_ep = service::get_local_storage_service().get_token_to_endpoint_map();
std::vector<storage_service_json::mapper> res;
return map_to_key_value(token_to_ep, res);
ss::get_token_endpoint.set(r, [](std::unique_ptr<request> req) {
return service::get_token_to_endpoint().then([] (const std::map<dht::token, gms::inet_address>& tokens){
std::vector<storage_service_json::mapper> res;
return make_ready_future<json::json_return_type>(map_to_key_value(tokens, res));
});
});
ss::get_leaving_nodes.set(r, [](const_req req) {
@@ -148,13 +130,12 @@ void set_storage_service(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(res);
});
ss::describe_any_ring.set(r, [&ctx](const_req req) {
return describe_ring("");
});
ss::describe_ring.set(r, [&ctx](const_req req) {
auto keyspace = validate_keyspace(ctx, req.param);
return describe_ring(keyspace);
ss::describe_ring_jmx.set(r, [&ctx](std::unique_ptr<request> req) {
//TBD
unimplemented();
auto keyspace = validate_keyspace(ctx, req->param);
std::vector<sstring> res;
return make_ready_future<json::json_return_type>(res);
});
ss::get_host_id_map.set(r, [](const_req req) {
@@ -167,88 +148,74 @@ void set_storage_service(http_context& ctx, routes& r) {
return get_cf_stats(ctx, &column_family::stats::live_disk_space_used);
});
ss::get_load_map.set(r, [] (std::unique_ptr<request> req) {
return service::get_local_storage_service().get_load_map().then([] (auto&& load_map) {
std::vector<ss::map_string_double> res;
for (auto i : load_map) {
ss::map_string_double val;
val.key = i.first;
val.value = i.second;
res.push_back(val);
ss::get_load_map.set(r, [&ctx](std::unique_ptr<request> req) {
// FIXME
// The function should return a mapping between inet address
// and the load (disk space used)
// in origin the implementation is based on the load broadcast
// we do not currently support.
// As a workaround, the local load is calculated (this part is similar
// to origin) and a map with a single entry is return.
return ctx.db.map_reduce0([](database& db) {
int64_t res = 0;
for (auto i : db.get_column_families()) {
res += i.second->get_stats().live_disk_space_used;
}
return make_ready_future<json::json_return_type>(res);
return res;
}, 0, std::plus<int64_t>()).then([](int64_t size) {
std::vector<ss::mapper> res;
std::unordered_map<gms::inet_address, double> load_map;
load_map[utils::fb_utilities::get_broadcast_address()] = size;
return make_ready_future<json::json_return_type>(map_to_key_value(load_map, res));
});
});
ss::get_current_generation_number.set(r, [](std::unique_ptr<request> req) {
gms::inet_address ep(utils::fb_utilities::get_broadcast_address());
return gms::get_local_gossiper().get_current_generation_number(ep).then([](int res) {
return gms::get_current_generation_number(ep).then([](int res) {
return make_ready_future<json::json_return_type>(res);
});
});
ss::get_natural_endpoints.set(r, [&ctx](const_req req) {
auto keyspace = validate_keyspace(ctx, req.param);
return container_to_vec(service::get_local_storage_service().get_natural_endpoints(keyspace, req.get_query_param("cf"),
req.get_query_param("key")));
ss::get_natural_endpoints.set(r, [&ctx](std::unique_ptr<request> req) {
//TBD
unimplemented();
auto keyspace = validate_keyspace(ctx, req->param);
auto column_family = req->get_query_param("cf");
auto key = req->get_query_param("key");
std::vector<sstring> res;
return make_ready_future<json::json_return_type>(res);
});
ss::get_snapshot_details.set(r, [](std::unique_ptr<request> req) {
return service::get_local_storage_service().get_snapshot_details().then([] (auto result) {
std::vector<ss::snapshots> res;
for (auto& map: result) {
ss::snapshots all_snapshots;
all_snapshots.key = map.first;
std::vector<ss::snapshot> snapshot;
for (auto& cf: map.second) {
ss::snapshot s;
s.ks = cf.ks;
s.cf = cf.cf;
s.live = cf.live;
s.total = cf.total;
snapshot.push_back(std::move(s));
}
all_snapshots.value = std::move(snapshot);
res.push_back(std::move(all_snapshots));
}
return make_ready_future<json::json_return_type>(std::move(res));
});
//TBD
unimplemented();
std::vector<ss::snapshots> res;
return make_ready_future<json::json_return_type>(res);
});
ss::take_snapshot.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
auto tag = req->get_query_param("tag");
auto keyname = req->get_query_param("kn");
auto column_family = req->get_query_param("cf");
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
auto resp = make_ready_future<>();
if (column_family.empty()) {
resp = service::get_local_storage_service().take_snapshot(tag, keynames);
} else {
if (keynames.size() > 1) {
throw httpd::bad_param_exception("Only one keyspace allowed when specifying a column family");
}
resp = service::get_local_storage_service().take_column_family_snapshot(keynames[0], column_family, tag);
}
return resp.then([] {
return make_ready_future<json::json_return_type>(json_void());
});
return make_ready_future<json::json_return_type>(json_void());
});
ss::del_snapshot.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
auto tag = req->get_query_param("tag");
std::vector<sstring> keynames = split(req->get_query_param("kn"), ",");
return service::get_local_storage_service().clear_snapshot(tag, keynames).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
auto keyname = req->get_query_param("kn");
return make_ready_future<json::json_return_type>(json_void());
});
ss::true_snapshots_size.set(r, [](std::unique_ptr<request> req) {
return service::get_local_storage_service().true_snapshots_size().then([] (int64_t size) {
return make_ready_future<json::json_return_type>(size);
});
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
ss::force_keyspace_compaction.set(r, [&ctx](std::unique_ptr<request> req) {
@@ -272,14 +239,10 @@ void set_storage_service(http_context& ctx, routes& r) {
ss::force_keyspace_cleanup.set(r, [&ctx](std::unique_ptr<request> req) {
//TBD
// FIXME
// the nodetool clean up is used in many tests
// this workaround willl let it work until
// a cleanup is implemented
warn(unimplemented::cause::API);
unimplemented();
auto keyspace = validate_keyspace(ctx, req->param);
auto column_family = req->get_query_param("cf");
return make_ready_future<json::json_return_type>(0);
return make_ready_future<json::json_return_type>(json_void());
});
ss::scrub.set(r, [&ctx](std::unique_ptr<request> req) {
@@ -318,14 +281,18 @@ void set_storage_service(http_context& ctx, routes& r) {
ss::repair_async.set(r, [&ctx](std::unique_ptr<request> req) {
static std::vector<sstring> options = {"primaryRange", "parallelism", "incremental",
"jobThreads", "ranges", "columnFamilies", "dataCenters", "hosts", "trace"};
// Currently, we get all the repair options encoded in a single
// "options" option, and split it to a map using the "," and ":"
// delimiters. TODO: consider if it doesn't make more sense to just
// take all the query parameters as this map and pass it to the repair
// function.
std::unordered_map<sstring, sstring> options_map;
for (auto o : options) {
auto s = req->get_query_param(o);
if (s != "") {
options_map[o] = s;
for (auto s : split(req->get_query_param("options"), ",")) {
auto kv = split(s, ":");
if (kv.size() != 2) {
throw httpd::bad_param_exception("malformed async repair options");
}
options_map.emplace(std::move(kv[0]), std::move(kv[1]));
}
// The repair process is asynchronous: repair_start only starts it and
@@ -363,16 +330,17 @@ void set_storage_service(http_context& ctx, routes& r) {
});
});
ss::move.set(r, [] (std::unique_ptr<request> req) {
ss::move.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
auto new_token = req->get_query_param("new_token");
return service::get_local_storage_service().move(new_token).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
return make_ready_future<json::json_return_type>(json_void());
});
ss::remove_node.set(r, [](std::unique_ptr<request> req) {
auto host_id = req->get_query_param("host_id");
return service::get_local_storage_service().remove_node(host_id).then([] {
// FIXME: This api is incorrect. remove_node takes a host id string parameter instead of token.
auto host_id = req->get_query_param("token");
return service::get_local_storage_service().remove_node(std::move(host_id)).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
});
@@ -417,18 +385,15 @@ void set_storage_service(http_context& ctx, routes& r) {
});
ss::get_drain_progress.set(r, [](std::unique_ptr<request> req) {
return service::get_storage_service().map_reduce(adder<service::storage_service::drain_progress>(), [] (auto& ss) {
return ss.get_drain_progress();
}).then([] (auto&& progress) {
auto progress_str = sprint("Drained %s/%s ColumnFamilies", progress.remaining_cfs, progress.total_cfs);
return make_ready_future<json::json_return_type>(std::move(progress_str));
});
//TBD
unimplemented();
return make_ready_future<json::json_return_type>("");
});
ss::drain.set(r, [](std::unique_ptr<request> req) {
return service::get_local_storage_service().drain().then([] {
return make_ready_future<json::json_return_type>(json_void());
});
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(json_void());
});
ss::truncate.set(r, [&ctx](std::unique_ptr<request> req) {
//TBD
@@ -475,10 +440,8 @@ void set_storage_service(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
ss::is_initialized.set(r, [](std::unique_ptr<request> req) {
return service::get_local_storage_service().is_initialized().then([] (bool initialized) {
return make_ready_future<json::json_return_type>(initialized);
});
ss::is_initialized.set(r, [](const_req req) {
return service::get_local_storage_service().is_initialized();
});
ss::stop_rpc_server.set(r, [](std::unique_ptr<request> req) {
@@ -493,10 +456,8 @@ void set_storage_service(http_context& ctx, routes& r) {
});
});
ss::is_rpc_server_running.set(r, [] (std::unique_ptr<request> req) {
return service::get_local_storage_service().is_rpc_server_running().then([] (bool running) {
return make_ready_future<json::json_return_type>(running);
});
ss::is_rpc_server_running.set(r, [](const_req req) {
return service::get_local_storage_service().is_rpc_server_running();
});
ss::start_native_transport.set(r, [](std::unique_ptr<request> req) {
@@ -511,10 +472,8 @@ void set_storage_service(http_context& ctx, routes& r) {
});
});
ss::is_native_transport_running.set(r, [] (std::unique_ptr<request> req) {
return service::get_local_storage_service().is_native_transport_running().then([] (bool running) {
return make_ready_future<json::json_return_type>(running);
});
ss::is_native_transport_running.set(r, [](const_req req) {
return service::get_local_storage_service().is_native_transport_running();
});
ss::join_ring.set(r, [](std::unique_ptr<request> req) {
@@ -523,10 +482,8 @@ void set_storage_service(http_context& ctx, routes& r) {
});
});
ss::is_joined.set(r, [] (std::unique_ptr<request> req) {
return service::get_local_storage_service().is_joined().then([] (bool is_joined) {
return make_ready_future<json::json_return_type>(is_joined);
});
ss::is_joined.set(r, [](const_req req) {
return service::get_local_storage_service().is_joined();
});
ss::set_stream_throughput_mb_per_sec.set(r, [](std::unique_ptr<request> req) {
@@ -542,9 +499,10 @@ void set_storage_service(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(0);
});
ss::get_compaction_throughput_mb_per_sec.set(r, [&ctx](std::unique_ptr<request> req) {
int value = ctx.db.local().get_config().compaction_throughput_mb_per_sec();
return make_ready_future<json::json_return_type>(value);
ss::get_compaction_throughput_mb_per_sec.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(0);
});
ss::set_compaction_throughput_mb_per_sec.set(r, [](std::unique_ptr<request> req) {
@@ -617,16 +575,11 @@ void set_storage_service(http_context& ctx, routes& r) {
});
ss::load_new_ss_tables.set(r, [&ctx](std::unique_ptr<request> req) {
auto ks = validate_keyspace(ctx, req->param);
auto cf = req->get_query_param("cf");
// No need to add the keyspace, since all we want is to avoid always sending this to the same
// CPU. Even then I am being overzealous here. This is not something that happens all the time.
auto coordinator = std::hash<sstring>()(cf) % smp::count;
return service::get_storage_service().invoke_on(coordinator, [ks = std::move(ks), cf = std::move(cf)] (service::storage_service& s) {
return s.load_new_sstables(ks, cf);
}).then([] {
return make_ready_future<json::json_return_type>(json_void());
});
//TBD
unimplemented();
auto keyspace = validate_keyspace(ctx, req->param);
auto column_family = req->get_query_param("cf");
return make_ready_future<json::json_return_type>(json_void());
});
ss::sample_key_range.set(r, [](std::unique_ptr<request> req) {
@@ -678,12 +631,16 @@ void set_storage_service(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(json_void());
});
ss::get_cluster_name.set(r, [](const_req req) {
return gms::get_local_gossiper().get_cluster_name();
ss::get_cluster_name.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(json_void());
});
ss::get_partitioner_name.set(r, [](const_req req) {
return gms::get_local_gossiper().get_partitioner_name();
ss::get_partitioner_name.set(r, [](std::unique_ptr<request> req) {
//TBD
unimplemented();
return make_ready_future<json::json_return_type>(json_void());
});
ss::get_tombstone_warn_threshold.set(r, [](std::unique_ptr<request> req) {
@@ -754,19 +711,17 @@ void set_storage_service(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(0);
});
ss::get_ownership.set(r, [] (std::unique_ptr<request> req) {
return service::get_local_storage_service().get_ownership().then([] (auto&& ownership) {
std::vector<storage_service_json::mapper> res;
return make_ready_future<json::json_return_type>(map_to_key_value(ownership, res));
});
ss::get_ownership.set(r, [](const_req req) {
auto tokens = service::get_local_storage_service().get_ownership();
std::vector<storage_service_json::mapper> res;
return map_to_key_value(tokens, res);
});
ss::get_effective_ownership.set(r, [&ctx] (std::unique_ptr<request> req) {
auto keyspace_name = req->param["keyspace"] == "null" ? "" : validate_keyspace(ctx, req->param);
return service::get_local_storage_service().effective_ownership(keyspace_name).then([] (auto&& ownership) {
std::vector<storage_service_json::mapper> res;
return make_ready_future<json::json_return_type>(map_to_key_value(ownership, res));
});
ss::get_effective_ownership.set(r, [&ctx](const_req req) {
auto tokens = service::get_local_storage_service().effective_ownership(
(req.param["keyspace"] == "null")? "" : validate_keyspace(ctx, req.param));
std::vector<storage_service_json::mapper> res;
return map_to_key_value(tokens, res);
});
}

View File

@@ -24,7 +24,6 @@
#include "streaming/stream_result_future.hh"
#include "api/api-doc/stream_manager.json.hh"
#include <vector>
#include "gms/gossiper.hh"
namespace api {
@@ -72,12 +71,11 @@ static hs::stream_state get_state(
si.peer = boost::lexical_cast<std::string>(info.peer);
si.session_index = info.session_index;
si.state = info.state;
si.connecting = si.peer;
si.connecting = boost::lexical_cast<std::string>(info.connecting);
set_summaries(info.receiving_summaries, si.receiving_summaries);
set_summaries(info.sending_summaries, si.sending_summaries);
set_files(info.receiving_files, si.receiving_files);
set_files(info.sending_files, si.sending_files);
state.sessions.push(si);
}
return state;
}
@@ -99,80 +97,6 @@ void set_stream_manager(http_context& ctx, routes& r) {
return make_ready_future<json::json_return_type>(res);
});
});
hs::get_all_active_streams_outbound.set(r, [](std::unique_ptr<request> req) {
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
return stream.get_initiated_streams().size();
}, 0, std::plus<int64_t>()).then([](int64_t res) {
return make_ready_future<json::json_return_type>(res);
});
});
hs::get_total_incoming_bytes.set(r, [](std::unique_ptr<request> req) {
gms::inet_address ep(req->param["peer"]);
utils::UUID plan_id = gms::get_local_gossiper().get_host_id(ep);
return streaming::get_stream_manager().map_reduce0([plan_id](streaming::stream_manager& stream) {
int64_t res = 0;
streaming::stream_result_future* s = stream.get_receiving_stream(plan_id).get();
if (s != nullptr) {
for (auto si: s->get_coordinator()->get_all_session_info()) {
res += si.get_total_size_received();
}
}
return res;
}, 0, std::plus<int64_t>()).then([](int64_t res) {
return make_ready_future<json::json_return_type>(res);
});
});
hs::get_all_total_incoming_bytes.set(r, [](std::unique_ptr<request> req) {
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
int64_t res = 0;
for (auto s : stream.get_receiving_streams()) {
if (s.second.get() != nullptr) {
for (auto si: s.second.get()->get_coordinator()->get_all_session_info()) {
res += si.get_total_size_received();
}
}
}
return res;
}, 0, std::plus<int64_t>()).then([](int64_t res) {
return make_ready_future<json::json_return_type>(res);
});
});
hs::get_total_outgoing_bytes.set(r, [](std::unique_ptr<request> req) {
gms::inet_address ep(req->param["peer"]);
utils::UUID plan_id = gms::get_local_gossiper().get_host_id(ep);
return streaming::get_stream_manager().map_reduce0([plan_id](streaming::stream_manager& stream) {
int64_t res = 0;
streaming::stream_result_future* s = stream.get_sending_stream(plan_id).get();
if (s != nullptr) {
for (auto si: s->get_coordinator()->get_all_session_info()) {
res += si.get_total_size_received();
}
}
return res;
}, 0, std::plus<int64_t>()).then([](int64_t res) {
return make_ready_future<json::json_return_type>(res);
});
});
hs::get_all_total_outgoing_bytes.set(r, [](std::unique_ptr<request> req) {
return streaming::get_stream_manager().map_reduce0([](streaming::stream_manager& stream) {
int64_t res = 0;
for (auto s : stream.get_initiated_streams()) {
if (s.second.get() != nullptr) {
for (auto si: s.second.get()->get_coordinator()->get_all_session_info()) {
res += si.get_total_size_received();
}
}
}
return res;
}, 0, std::plus<int64_t>()).then([](int64_t res) {
return make_ready_future<json::json_return_type>(res);
});
});
}
}

View File

@@ -1,70 +0,0 @@
/*
* Copyright 2015 Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "api/api-doc/system.json.hh"
#include "api/api.hh"
#include "http/exception.hh"
#include "log.hh"
namespace api {
namespace hs = httpd::system_json;
void set_system(http_context& ctx, routes& r) {
hs::get_all_logger_names.set(r, [](const_req req) {
return logging::logger_registry().get_all_logger_names();
});
hs::set_all_logger_level.set(r, [](const_req req) {
try {
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
logging::logger_registry().set_all_loggers_level(level);
} catch (boost::bad_lexical_cast& e) {
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
}
return json::json_void();
});
hs::get_logger_level.set(r, [](const_req req) {
try {
return logging::level_name(logging::logger_registry().get_logger_level(req.param["name"]));
} catch (std::out_of_range& e) {
throw bad_param_exception("Unknown logger name " + req.param["name"]);
}
// just to keep the compiler happy
return sstring();
});
hs::set_logger_level.set(r, [](const_req req) {
try {
logging::log_level level = boost::lexical_cast<logging::log_level>(std::string(req.get_query_param("level")));
logging::logger_registry().set_logger_level(req.param["name"], level);
} catch (std::out_of_range& e) {
throw bad_param_exception("Unknown logger name " + req.param["name"]);
} catch (boost::bad_lexical_cast& e) {
throw bad_param_exception("Unknown logging level " + req.get_query_param("level"));
}
return json::json_void();
});
}
}

View File

@@ -1,30 +0,0 @@
/*
* Copyright 2015 Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "api.hh"
namespace api {
void set_system(http_context& ctx, routes& r);
}

View File

@@ -234,8 +234,6 @@ public:
friend std::ostream& operator<<(std::ostream& os, const atomic_cell& ac);
};
class collection_mutation_view;
// Represents a mutation of a collection. Actual format is determined by collection type,
// and is:
// set: list of atomic_cell
@@ -243,35 +241,58 @@ class collection_mutation_view;
// list: tbd, probably ugly
class collection_mutation {
public:
managed_bytes data;
collection_mutation() {}
collection_mutation(managed_bytes b) : data(std::move(b)) {}
collection_mutation(collection_mutation_view v);
operator collection_mutation_view() const;
struct view {
bytes_view data;
bytes_view serialize() const { return data; }
static view from_bytes(bytes_view v) { return { v }; }
};
struct one {
managed_bytes data;
one() {}
one(managed_bytes b) : data(std::move(b)) {}
one(view v) : data(v.data) {}
operator view() const { return { data }; }
};
};
class collection_mutation_view {
public:
bytes_view data;
bytes_view serialize() const { return data; }
static collection_mutation_view from_bytes(bytes_view v) { return { v }; }
};
inline
collection_mutation::collection_mutation(collection_mutation_view v)
: data(v.data) {
}
inline
collection_mutation::operator collection_mutation_view() const {
return { data };
}
namespace db {
template<typename T>
class serializer;
}
// A variant type that can hold either an atomic_cell, or a serialized collection.
// Which type is stored is determined by the schema.
class atomic_cell_or_collection final {
managed_bytes _data;
template<typename T>
friend class db::serializer;
private:
atomic_cell_or_collection(managed_bytes&& data) : _data(std::move(data)) {}
public:
atomic_cell_or_collection() = default;
atomic_cell_or_collection(atomic_cell ac) : _data(std::move(ac._data)) {}
static atomic_cell_or_collection from_atomic_cell(atomic_cell data) { return { std::move(data._data) }; }
atomic_cell_view as_atomic_cell() const { return atomic_cell_view::from_bytes(_data); }
atomic_cell_or_collection(collection_mutation::one cm) : _data(std::move(cm.data)) {}
explicit operator bool() const {
return !_data.empty();
}
static atomic_cell_or_collection from_collection_mutation(collection_mutation::one data) {
return std::move(data.data);
}
collection_mutation::view as_collection_mutation() const {
return collection_mutation::view{_data};
}
bytes_view serialize() const {
return _data;
}
bool operator==(const atomic_cell_or_collection& other) const {
return _data == other._data;
}
friend std::ostream& operator<<(std::ostream&, const atomic_cell_or_collection&);
};
class column_definition;
int compare_atomic_cell_for_merge(atomic_cell_view left, atomic_cell_view right);

View File

@@ -1,57 +0,0 @@
/*
* Copyright (C) 2015 Cloudius Systems, Ltd.
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
// Not part of atomic_cell.hh to avoid cyclic dependency between types.hh and atomic_cell.hh
#include "types.hh"
#include "atomic_cell.hh"
#include "hashing.hh"
template<typename Hasher>
void feed_hash(collection_mutation_view cell, Hasher& h, const data_type& type) {
auto&& ctype = static_pointer_cast<const collection_type_impl>(type);
auto m_view = ctype->deserialize_mutation_form(cell);
::feed_hash(h, m_view.tomb);
for (auto&& key_and_value : m_view.cells) {
::feed_hash(h, key_and_value.first);
::feed_hash(h, key_and_value.second);
}
}
template<>
struct appending_hash<atomic_cell_view> {
template<typename Hasher>
void operator()(Hasher& h, atomic_cell_view cell) const {
feed_hash(h, cell.is_live());
feed_hash(h, cell.timestamp());
if (cell.is_live()) {
if (cell.is_live_and_has_ttl()) {
feed_hash(h, cell.expiry());
feed_hash(h, cell.ttl());
}
feed_hash(h, cell.value());
} else {
feed_hash(h, cell.deletion_time());
}
}
};

View File

@@ -1,73 +0,0 @@
/*
* Copyright (C) 2015 Cloudius Systems, Ltd.
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "atomic_cell.hh"
#include "schema.hh"
#include "hashing.hh"
// A variant type that can hold either an atomic_cell, or a serialized collection.
// Which type is stored is determined by the schema.
class atomic_cell_or_collection final {
managed_bytes _data;
template<typename T>
friend class db::serializer;
private:
atomic_cell_or_collection(managed_bytes&& data) : _data(std::move(data)) {}
public:
atomic_cell_or_collection() = default;
atomic_cell_or_collection(atomic_cell ac) : _data(std::move(ac._data)) {}
static atomic_cell_or_collection from_atomic_cell(atomic_cell data) { return { std::move(data._data) }; }
atomic_cell_view as_atomic_cell() const { return atomic_cell_view::from_bytes(_data); }
atomic_cell_or_collection(collection_mutation cm) : _data(std::move(cm.data)) {}
explicit operator bool() const {
return !_data.empty();
}
static atomic_cell_or_collection from_collection_mutation(collection_mutation data) {
return std::move(data.data);
}
collection_mutation_view as_collection_mutation() const {
return collection_mutation_view{_data};
}
bytes_view serialize() const {
return _data;
}
bool operator==(const atomic_cell_or_collection& other) const {
return _data == other._data;
}
template<typename Hasher>
void feed_hash(Hasher& h, const column_definition& def) const {
if (def.is_atomic()) {
::feed_hash(h, as_atomic_cell());
} else {
::feed_hash(as_collection_mutation(), h, def.type);
}
}
void linearize() {
_data.linearize();
}
void unlinearize() {
_data.scatter();
}
friend std::ostream& operator<<(std::ostream&, const atomic_cell_or_collection&);
};

View File

@@ -1,236 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include <seastar/core/sleep.hh>
#include "auth.hh"
#include "authenticator.hh"
#include "database.hh"
#include "cql3/query_processor.hh"
#include "cql3/statements/cf_statement.hh"
#include "cql3/statements/create_table_statement.hh"
#include "db/config.hh"
#include "service/migration_manager.hh"
const sstring auth::auth::DEFAULT_SUPERUSER_NAME("cassandra");
const sstring auth::auth::AUTH_KS("system_auth");
const sstring auth::auth::USERS_CF("users");
static const sstring USER_NAME("name");
static const sstring SUPER("super");
static logging::logger logger("auth");
// TODO: configurable
using namespace std::chrono_literals;
const std::chrono::milliseconds auth::auth::SUPERUSER_SETUP_DELAY = 10000ms;
class auth_migration_listener : public service::migration_listener {
void on_create_keyspace(const sstring& ks_name) override {}
void on_create_column_family(const sstring& ks_name, const sstring& cf_name) override {}
void on_create_user_type(const sstring& ks_name, const sstring& type_name) override {}
void on_create_function(const sstring& ks_name, const sstring& function_name) override {}
void on_create_aggregate(const sstring& ks_name, const sstring& aggregate_name) override {}
void on_update_keyspace(const sstring& ks_name) override {}
void on_update_column_family(const sstring& ks_name, const sstring& cf_name, bool) override {}
void on_update_user_type(const sstring& ks_name, const sstring& type_name) override {}
void on_update_function(const sstring& ks_name, const sstring& function_name) override {}
void on_update_aggregate(const sstring& ks_name, const sstring& aggregate_name) override {}
void on_drop_keyspace(const sstring& ks_name) override {
// TODO:
//DatabaseDescriptor.getAuthorizer().revokeAll(DataResource.keyspace(ksName));
}
void on_drop_column_family(const sstring& ks_name, const sstring& cf_name) override {
// TODO:
//DatabaseDescriptor.getAuthorizer().revokeAll(DataResource.columnFamily(ksName, cfName));
}
void on_drop_user_type(const sstring& ks_name, const sstring& type_name) override {}
void on_drop_function(const sstring& ks_name, const sstring& function_name) override {}
void on_drop_aggregate(const sstring& ks_name, const sstring& aggregate_name) override {}
};
static auth_migration_listener auth_migration;
bool auth::auth::is_class_type(const sstring& type, const sstring& classname) {
if (type == classname) {
return true;
}
auto i = classname.find_last_of('.');
return classname.compare(i + 1, sstring::npos, type) == 0;
}
future<> auth::auth::setup() {
auto& db = cql3::get_local_query_processor().db().local();
auto& cfg = db.get_config();
auto type = cfg.authenticator();
if (is_class_type(type, authenticator::ALLOW_ALL_AUTHENTICATOR_NAME)) {
return authenticator::setup(type).discard_result(); // just create the object
}
future<> f = make_ready_future();
if (!db.has_keyspace(AUTH_KS)) {
std::map<sstring, sstring> opts;
opts["replication_factor"] = "1";
auto ksm = keyspace_metadata::new_keyspace(AUTH_KS, "org.apache.cassandra.locator.SimpleStrategy", opts, true);
f = service::get_local_migration_manager().announce_new_keyspace(ksm, false);
}
return f.then([] {
return setup_table(USERS_CF, sprint("CREATE TABLE %s.%s (%s text, %s boolean, PRIMARY KEY(%s)) WITH gc_grace_seconds=%d",
AUTH_KS, USERS_CF, USER_NAME, SUPER, USER_NAME,
90 * 24 * 60 * 60)); // 3 months.
}).then([type] {
return authenticator::setup(type).discard_result();
}).then([] {
// TODO authorizer
}).then([] {
service::get_local_migration_manager().register_listener(&auth_migration); // again, only one shard...
// instead of once-timer, just schedule this later
sleep(SUPERUSER_SETUP_DELAY).then([] {
// setup default super user
return has_existing_users(USERS_CF, DEFAULT_SUPERUSER_NAME, USER_NAME).then([](bool exists) {
if (!exists) {
auto query = sprint("INSERT INTO %s.%s (%s, %s) VALUES (?, ?) USING TIMESTAMP 0",
AUTH_KS, USERS_CF, USER_NAME, SUPER);
cql3::get_local_query_processor().process(query, db::consistency_level::ONE, {DEFAULT_SUPERUSER_NAME, true}).then([](auto) {
logger.info("Created default superuser '{}'", DEFAULT_SUPERUSER_NAME);
}).handle_exception([](auto ep) {
try {
std::rethrow_exception(ep);
} catch (exceptions::request_execution_exception&) {
logger.warn("Skipped default superuser setup: some nodes were not ready");
}
});
}
});
});
});
}
static db::consistency_level consistency_for_user(const sstring& username) {
if (username == auth::auth::DEFAULT_SUPERUSER_NAME) {
return db::consistency_level::QUORUM;
}
return db::consistency_level::LOCAL_ONE;
}
static future<::shared_ptr<cql3::untyped_result_set>> select_user(const sstring& username) {
// Here was a thread local, explicit cache of prepared statement. In normal execution this is
// fine, but since we in testing set up and tear down system over and over, we'd start using
// obsolete prepared statements pretty quickly.
// Rely on query processing caching statements instead, and lets assume
// that a map lookup string->statement is not gonna kill us much.
return cql3::get_local_query_processor().process(
sprint("SELECT * FROM %s.%s WHERE %s = ?",
auth::auth::AUTH_KS, auth::auth::USERS_CF,
USER_NAME), consistency_for_user(username),
{ username }, true);
}
future<bool> auth::auth::is_existing_user(const sstring& username) {
return select_user(username).then(
[](::shared_ptr<cql3::untyped_result_set> res) {
return make_ready_future<bool>(!res->empty());
});
}
future<bool> auth::auth::is_super_user(const sstring& username) {
return select_user(username).then(
[](::shared_ptr<cql3::untyped_result_set> res) {
return make_ready_future<bool>(!res->empty() && res->one().get_as<bool>(SUPER));
});
}
future<> auth::auth::insert_user(const sstring& username, bool is_super)
throw (exceptions::request_execution_exception) {
return cql3::get_local_query_processor().process(sprint("INSERT INTO %s.%s (%s, %s) VALUES (?, ?)",
AUTH_KS, USERS_CF, USER_NAME, SUPER),
consistency_for_user(username), { username, is_super }).discard_result();
}
future<> auth::auth::delete_user(const sstring& username) throw(exceptions::request_execution_exception) {
return cql3::get_local_query_processor().process(sprint("DELETE FROM %s.%s WHERE %s = ?",
AUTH_KS, USERS_CF, USER_NAME),
consistency_for_user(username), { username }).discard_result();
}
future<> auth::auth::setup_table(const sstring& name, const sstring& cql) {
auto& qp = cql3::get_local_query_processor();
auto& db = qp.db().local();
if (db.has_schema(AUTH_KS, name)) {
return make_ready_future();
}
::shared_ptr<cql3::statements::cf_statement> parsed = static_pointer_cast<
cql3::statements::cf_statement>(cql3::query_processor::parse_statement(cql));
parsed->prepare_keyspace(AUTH_KS);
::shared_ptr<cql3::statements::create_table_statement> statement =
static_pointer_cast<cql3::statements::create_table_statement>(
parsed->prepare(db)->statement);
// Origin sets "Legacy Cf Id" for the new table. We have no need to be
// pre-2.1 compatible (afaik), so lets skip a whole lotta hoolaballo
return statement->announce_migration(qp.proxy(), false).then([statement](bool) {});
}
future<bool> auth::auth::has_existing_users(const sstring& cfname, const sstring& def_user_name, const sstring& name_column) {
auto default_user_query = sprint("SELECT * FROM %s.%s WHERE %s = ?", AUTH_KS, cfname, name_column);
auto all_users_query = sprint("SELECT * FROM %s.%s LIMIT 1", AUTH_KS, cfname);
return cql3::get_local_query_processor().process(default_user_query, db::consistency_level::ONE, { def_user_name }).then([=](::shared_ptr<cql3::untyped_result_set> res) {
if (!res->empty()) {
return make_ready_future<bool>(true);
}
return cql3::get_local_query_processor().process(default_user_query, db::consistency_level::QUORUM, { def_user_name }).then([all_users_query](::shared_ptr<cql3::untyped_result_set> res) {
if (!res->empty()) {
return make_ready_future<bool>(true);
}
return cql3::get_local_query_processor().process(all_users_query, db::consistency_level::QUORUM).then([](::shared_ptr<cql3::untyped_result_set> res) {
return make_ready_future<bool>(!res->empty());
});
});
});
}

View File

@@ -1,116 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <chrono>
#include <seastar/core/sstring.hh>
#include <seastar/core/future.hh>
#include "exceptions/exceptions.hh"
namespace auth {
class auth {
public:
static const sstring DEFAULT_SUPERUSER_NAME;
static const sstring AUTH_KS;
static const sstring USERS_CF;
static const std::chrono::milliseconds SUPERUSER_SETUP_DELAY;
static bool is_class_type(const sstring& type, const sstring& classname);
#if 0
public static Set<Permission> getPermissions(AuthenticatedUser user, IResource resource)
{
return permissionsCache.getPermissions(user, resource);
}
#endif
/**
* Checks if the username is stored in AUTH_KS.USERS_CF.
*
* @param username Username to query.
* @return whether or not Cassandra knows about the user.
*/
static future<bool> is_existing_user(const sstring& username);
/**
* Checks if the user is a known superuser.
*
* @param username Username to query.
* @return true is the user is a superuser, false if they aren't or don't exist at all.
*/
static future<bool> is_super_user(const sstring& username);
/**
* Inserts the user into AUTH_KS.USERS_CF (or overwrites their superuser status as a result of an ALTER USER query).
*
* @param username Username to insert.
* @param isSuper User's new status.
* @throws RequestExecutionException
*/
static future<> insert_user(const sstring& username, bool is_super) throw(exceptions::request_execution_exception);
/**
* Deletes the user from AUTH_KS.USERS_CF.
*
* @param username Username to delete.
* @throws RequestExecutionException
*/
static future<> delete_user(const sstring& username) throw(exceptions::request_execution_exception);
/**
* Sets up Authenticator and Authorizer.
*/
static future<> setup();
/**
* Set up table from given CREATE TABLE statement under system_auth keyspace, if not already done so.
*
* @param name name of the table
* @param cql CREATE TABLE statement
*/
static future<> setup_table(const sstring& name, const sstring& cql);
static future<bool> has_existing_users(const sstring& cfname, const sstring& def_user_name, const sstring& name_column_name);
};
}

View File

@@ -1,61 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "authenticated_user.hh"
const sstring auth::authenticated_user::ANONYMOUS_USERNAME("anonymous");
auth::authenticated_user::authenticated_user()
: _anon(true)
{}
auth::authenticated_user::authenticated_user(sstring name)
: _name(name), _anon(false)
{}
const sstring& auth::authenticated_user::name() const {
return _anon ? ANONYMOUS_USERNAME : _name;
}
bool auth::authenticated_user::operator==(const authenticated_user& v) const {
return _anon ? v._anon : _name == v._name;
}

View File

@@ -1,79 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <seastar/core/sstring.hh>
namespace auth {
class authenticated_user {
public:
static const sstring ANONYMOUS_USERNAME;
authenticated_user();
authenticated_user(sstring name);
const sstring& name() const;
/**
* Checks the user's superuser status.
* Only a superuser is allowed to perform CREATE USER and DROP USER queries.
* Im most cased, though not necessarily, a superuser will have Permission.ALL on every resource
* (depends on IAuthorizer implementation).
*/
bool is_super() const;
/**
* If IAuthenticator doesn't require authentication, this method may return true.
*/
bool is_anonymous() const {
return _anon;
}
bool operator==(const authenticated_user&) const;
private:
sstring _name;
bool _anon;
};
}

View File

@@ -1,110 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "authenticator.hh"
#include "authenticated_user.hh"
#include "password_authenticator.hh"
#include "auth.hh"
#include "db/config.hh"
const sstring auth::authenticator::USERNAME_KEY("username");
const sstring auth::authenticator::PASSWORD_KEY("password");
const sstring auth::authenticator::ALLOW_ALL_AUTHENTICATOR_NAME("org.apache.cassandra.auth.AllowAllAuthenticator");
/**
* Authenticator is assumed to be a fully state-less immutable object (note all the const).
* We thus store a single instance globally, since it should be safe/ok.
*/
static std::unique_ptr<auth::authenticator> global_authenticator;
future<>
auth::authenticator::setup(const sstring& type) throw (exceptions::configuration_exception) {
if (auth::auth::is_class_type(type, ALLOW_ALL_AUTHENTICATOR_NAME)) {
class allow_all_authenticator : public authenticator {
public:
const sstring& class_name() const override {
return ALLOW_ALL_AUTHENTICATOR_NAME;
}
bool require_authentication() const override {
return false;
}
option_set supported_options() const override {
return option_set();
}
option_set alterable_options() const override {
return option_set();
}
future<::shared_ptr<authenticated_user>> authenticate(const credentials_map& credentials) const throw(exceptions::authentication_exception) override {
return make_ready_future<::shared_ptr<authenticated_user>>(::make_shared<authenticated_user>());
}
future<> create(sstring username, const option_map& options) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) override {
return make_ready_future();
}
future<> alter(sstring username, const option_map& options) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) override {
return make_ready_future();
}
future<> drop(sstring username) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) override {
return make_ready_future();
}
resource_ids protected_resources() const override {
return resource_ids();
}
::shared_ptr<sasl_challenge> new_sasl_challenge() const override {
throw std::runtime_error("Should not reach");
}
};
global_authenticator = std::make_unique<allow_all_authenticator>();
} else if (auth::auth::is_class_type(type, password_authenticator::PASSWORD_AUTHENTICATOR_NAME)) {
auto pwa = std::make_unique<password_authenticator>();
auto f = pwa->init();
return f.then([pwa = std::move(pwa)]() mutable {
global_authenticator = std::move(pwa);
});
} else {
throw exceptions::configuration_exception("Invalid authenticator type: " + type);
}
return make_ready_future();
}
auth::authenticator& auth::authenticator::get() {
assert(global_authenticator);
return *global_authenticator;
}

View File

@@ -1,198 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <memory>
#include <unordered_map>
#include <set>
#include <stdexcept>
#include <boost/any.hpp>
#include <seastar/core/sstring.hh>
#include <seastar/core/future.hh>
#include <seastar/core/shared_ptr.hh>
#include <seastar/core/enum.hh>
#include "bytes.hh"
#include "data_resource.hh"
#include "enum_set.hh"
#include "exceptions/exceptions.hh"
namespace db {
class config;
}
namespace auth {
class authenticated_user;
class authenticator {
public:
static const sstring USERNAME_KEY;
static const sstring PASSWORD_KEY;
static const sstring ALLOW_ALL_AUTHENTICATOR_NAME;
/**
* Supported CREATE USER/ALTER USER options.
* Currently only PASSWORD is available.
*/
enum class option {
PASSWORD
};
using option_set = enum_set<super_enum<option, option::PASSWORD>>;
using option_map = std::unordered_map<option, boost::any, enum_hash<option>>;
using credentials_map = std::unordered_map<sstring, sstring>;
/**
* Resource id mappings, i.e. keyspace and/or column families.
*/
using resource_ids = std::set<data_resource>;
/**
* Setup is called once upon system startup to initialize the IAuthenticator.
*
* For example, use this method to create any required keyspaces/column families.
* Note: Only call from main thread.
*/
static future<> setup(const sstring& type) throw(exceptions::configuration_exception);
/**
* Returns the system authenticator. Must have called setup before calling this.
*/
static authenticator& get();
virtual ~authenticator()
{}
virtual const sstring& class_name() const = 0;
/**
* Whether or not the authenticator requires explicit login.
* If false will instantiate user with AuthenticatedUser.ANONYMOUS_USER.
*/
virtual bool require_authentication() const = 0;
/**
* Set of options supported by CREATE USER and ALTER USER queries.
* Should never return null - always return an empty set instead.
*/
virtual option_set supported_options() const = 0;
/**
* Subset of supportedOptions that users are allowed to alter when performing ALTER USER [themselves].
* Should never return null - always return an empty set instead.
*/
virtual option_set alterable_options() const = 0;
/**
* Authenticates a user given a Map<String, String> of credentials.
* Should never return null - always throw AuthenticationException instead.
* Returning AuthenticatedUser.ANONYMOUS_USER is an option as well if authentication is not required.
*
* @throws authentication_exception if credentials don't match any known user.
*/
virtual future<::shared_ptr<authenticated_user>> authenticate(const credentials_map& credentials) const throw(exceptions::authentication_exception) = 0;
/**
* Called during execution of CREATE USER query (also may be called on startup, see seedSuperuserOptions method).
* If authenticator is static then the body of the method should be left blank, but don't throw an exception.
* options are guaranteed to be a subset of supportedOptions().
*
* @param username Username of the user to create.
* @param options Options the user will be created with.
* @throws exceptions::request_validation_exception
* @throws exceptions::request_execution_exception
*/
virtual future<> create(sstring username, const option_map& options) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) = 0;
/**
* Called during execution of ALTER USER query.
* options are always guaranteed to be a subset of supportedOptions(). Furthermore, if the user performing the query
* is not a superuser and is altering himself, then options are guaranteed to be a subset of alterableOptions().
* Keep the body of the method blank if your implementation doesn't support any options.
*
* @param username Username of the user that will be altered.
* @param options Options to alter.
* @throws exceptions::request_validation_exception
* @throws exceptions::request_execution_exception
*/
virtual future<> alter(sstring username, const option_map& options) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) = 0;
/**
* Called during execution of DROP USER query.
*
* @param username Username of the user that will be dropped.
* @throws exceptions::request_validation_exception
* @throws exceptions::request_execution_exception
*/
virtual future<> drop(sstring username) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) = 0;
/**
* Set of resources that should be made inaccessible to users and only accessible internally.
*
* @return Keyspaces, column families that will be unmodifiable by users; other resources.
* @see resource_ids
*/
virtual resource_ids protected_resources() const = 0;
class sasl_challenge {
public:
virtual ~sasl_challenge() {}
virtual bytes evaluate_response(bytes_view client_response) throw(exceptions::authentication_exception) = 0;
virtual bool is_complete() const = 0;
virtual future<::shared_ptr<authenticated_user>> get_authenticated_user() const throw(exceptions::authentication_exception) = 0;
};
/**
* Provide a sasl_challenge to be used by the CQL binary protocol server. If
* the configured authenticator requires authentication but does not implement this
* interface we refuse to start the binary protocol server as it will have no way
* of authenticating clients.
* @return sasl_challenge implementation
*/
virtual ::shared_ptr<sasl_challenge> new_sasl_challenge() const = 0;
};
}

View File

@@ -1,175 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "data_resource.hh"
#include <regex>
#include "service/storage_proxy.hh"
const sstring auth::data_resource::ROOT_NAME("data");
auth::data_resource::data_resource(level l, const sstring& ks, const sstring& cf)
: _ks(ks), _cf(cf)
{
if (l != get_level()) {
throw std::invalid_argument("level/keyspace/column mismatch");
}
}
auth::data_resource::data_resource()
: data_resource(level::ROOT)
{}
auth::data_resource::data_resource(const sstring& ks)
: data_resource(level::KEYSPACE, ks)
{}
auth::data_resource::data_resource(const sstring& ks, const sstring& cf)
: data_resource(level::COLUMN_FAMILY, ks, cf)
{}
auth::data_resource::level auth::data_resource::get_level() const {
if (!_cf.empty()) {
assert(!_ks.empty());
return level::COLUMN_FAMILY;
}
if (!_ks.empty()) {
return level::KEYSPACE;
}
return level::ROOT;
}
auth::data_resource auth::data_resource::from_name(
const sstring& s) {
static std::regex slash_regex("/");
auto i = std::regex_token_iterator<sstring::const_iterator>(s.begin(),
s.end(), slash_regex, -1);
auto e = std::regex_token_iterator<sstring::const_iterator>();
auto n = std::distance(i, e);
if (n > 3 || ROOT_NAME != sstring(*i++)) {
throw std::invalid_argument(sprint("%s is not a valid data resource name", s));
}
if (n == 1) {
return data_resource();
}
auto ks = *i++;
if (n == 2) {
return data_resource(ks.str());
}
auto cf = *i++;
return data_resource(ks.str(), cf.str());
}
sstring auth::data_resource::name() const {
switch (get_level()) {
case level::ROOT:
return ROOT_NAME;
case level::KEYSPACE:
return sprint("%s/%s", ROOT_NAME, _ks);
case level::COLUMN_FAMILY:
default:
return sprint("%s/%s/%s", ROOT_NAME, _ks, _cf);
}
}
auth::data_resource auth::data_resource::get_parent() const {
switch (get_level()) {
case level::KEYSPACE:
return data_resource();
case level::COLUMN_FAMILY:
return data_resource(_ks);
default:
throw std::invalid_argument("Root-level resource can't have a parent");
}
}
const sstring& auth::data_resource::keyspace() const
throw (std::invalid_argument) {
if (is_root_level()) {
throw std::invalid_argument("ROOT data resource has no keyspace");
}
return _ks;
}
const sstring& auth::data_resource::column_family() const
throw (std::invalid_argument) {
if (!is_column_family_level()) {
throw std::invalid_argument(sprint("%s data resource has no column family", name()));
}
return _cf;
}
bool auth::data_resource::has_parent() const {
return !is_root_level();
}
bool auth::data_resource::exists() const {
switch (get_level()) {
case level::ROOT:
return true;
case level::KEYSPACE:
return service::get_local_storage_proxy().get_db().local().has_keyspace(_ks);
case level::COLUMN_FAMILY:
default:
return service::get_local_storage_proxy().get_db().local().has_schema(_ks, _cf);
}
}
sstring auth::data_resource::to_string() const {
return name();
}
bool auth::data_resource::operator==(const data_resource& v) const {
return _ks == v._ks && _cf == v._cf;
}
bool auth::data_resource::operator<(const data_resource& v) const {
return _ks < v._ks ? true : (v._ks < _ks ? false : _cf < v._cf);
}
std::ostream& auth::operator<<(std::ostream& os, const data_resource& r) {
return os << r.name();
}

View File

@@ -1,146 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <iosfwd>
#include <seastar/core/sstring.hh>
namespace auth {
class data_resource {
private:
enum class level {
ROOT, KEYSPACE, COLUMN_FAMILY
};
static const sstring ROOT_NAME;
sstring _ks;
sstring _cf;
data_resource(level, const sstring& ks = {}, const sstring& cf = {});
level get_level() const;
public:
/**
* Creates a DataResource representing the root-level resource.
* @return the root-level resource.
*/
data_resource();
/**
* Creates a DataResource representing a keyspace.
*
* @param keyspace Name of the keyspace.
*/
data_resource(const sstring& ks);
/**
* Creates a DataResource instance representing a column family.
*
* @param keyspace Name of the keyspace.
* @param columnFamily Name of the column family.
*/
data_resource(const sstring& ks, const sstring& cf);
/**
* Parses a data resource name into a DataResource instance.
*
* @param name Name of the data resource.
* @return DataResource instance matching the name.
*/
static data_resource from_name(const sstring&);
/**
* @return Printable name of the resource.
*/
sstring name() const;
/**
* @return Parent of the resource, if any. Throws IllegalStateException if it's the root-level resource.
*/
data_resource get_parent() const;
bool is_root_level() const {
return get_level() == level::ROOT;
}
bool is_keyspace_level() const {
return get_level() == level::KEYSPACE;
}
bool is_column_family_level() const {
return get_level() == level::COLUMN_FAMILY;
}
/**
* @return keyspace of the resource.
* @throws std::invalid_argument if it's the root-level resource.
*/
const sstring& keyspace() const throw(std::invalid_argument);
/**
* @return column family of the resource.
* @throws std::invalid_argument if it's not a cf-level resource.
*/
const sstring& column_family() const throw(std::invalid_argument);
/**
* @return Whether or not the resource has a parent in the hierarchy.
*/
bool has_parent() const;
/**
* @return Whether or not the resource exists in scylla.
*/
bool exists() const;
sstring to_string() const;
bool operator==(const data_resource&) const;
bool operator<(const data_resource&) const;
};
std::ostream& operator<<(std::ostream&, const data_resource&);
}

View File

@@ -1,357 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include <unistd.h>
#include <crypt.h>
#include <random>
#include <chrono>
#include <seastar/core/reactor.hh>
#include "auth.hh"
#include "password_authenticator.hh"
#include "authenticated_user.hh"
#include "cql3/query_processor.hh"
#include "log.hh"
const sstring auth::password_authenticator::PASSWORD_AUTHENTICATOR_NAME("org.apache.cassandra.auth.PasswordAuthenticator");
// name of the hash column.
static const sstring SALTED_HASH = "salted_hash";
static const sstring USER_NAME = "username";
static const sstring DEFAULT_USER_NAME = auth::auth::DEFAULT_SUPERUSER_NAME;
static const sstring DEFAULT_USER_PASSWORD = auth::auth::DEFAULT_SUPERUSER_NAME;
static const sstring CREDENTIALS_CF = "credentials";
static logging::logger logger("password_authenticator");
auth::password_authenticator::~password_authenticator()
{}
auth::password_authenticator::password_authenticator()
{}
// TODO: blowfish
// Origin uses Java bcrypt library, i.e. blowfish salt
// generation and hashing, which is arguably a "better"
// password hash than sha/md5 versions usually available in
// crypt_r. Otoh, glibc 2.7+ uses a modified sha512 algo
// which should be the same order of safe, so the only
// real issue should be salted hash compatibility with
// origin if importing system tables from there.
//
// Since bcrypt/blowfish is _not_ (afaict) not available
// as a dev package/lib on most linux distros, we'd have to
// copy and compile for example OWL crypto
// (http://cvsweb.openwall.com/cgi/cvsweb.cgi/Owl/packages/glibc/crypt_blowfish/)
// to be fully bit-compatible.
//
// Until we decide this is needed, let's just use crypt_r,
// and some old-fashioned random salt generation.
static constexpr size_t rand_bytes = 16;
static sstring hashpw(const sstring& pass, const sstring& salt) {
// crypt_data is huge. should this be a thread_local static?
auto tmp = std::make_unique<crypt_data>();
tmp->initialized = 0;
auto res = crypt_r(pass.c_str(), salt.c_str(), tmp.get());
if (res == nullptr) {
throw std::system_error(errno, std::system_category());
}
return res;
}
static bool checkpw(const sstring& pass, const sstring& salted_hash) {
auto tmp = hashpw(pass, salted_hash);
return tmp == salted_hash;
}
static sstring gensalt() {
static sstring prefix;
std::random_device rd;
std::default_random_engine e1(rd());
std::uniform_int_distribution<char> dist;
sstring valid_salt = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789./";
sstring input(rand_bytes, 0);
for (char&c : input) {
c = valid_salt[dist(e1) % valid_salt.size()];
}
sstring salt;
if (!prefix.empty()) {
return prefix + salt;
}
auto tmp = std::make_unique<crypt_data>();
tmp->initialized = 0;
// Try in order:
// blowfish 2011 fix, blowfish, sha512, sha256, md5
for (sstring pfx : { "$2y$", "$2a$", "$6$", "$5$", "$1$" }) {
salt = pfx + input;
if (crypt_r("fisk", salt.c_str(), tmp.get())) {
prefix = pfx;
return salt;
}
}
throw std::runtime_error("Could not initialize hashing algorithm");
}
static sstring hashpw(const sstring& pass) {
return hashpw(pass, gensalt());
}
future<> auth::password_authenticator::init() {
gensalt(); // do this once to determine usable hashing
sstring create_table = sprint(
"CREATE TABLE %s.%s ("
"%s text,"
"%s text," // salt + hash + number of rounds
"options map<text,text>,"// for future extensions
"PRIMARY KEY(%s)"
") WITH gc_grace_seconds=%d",
auth::auth::AUTH_KS,
CREDENTIALS_CF, USER_NAME, SALTED_HASH, USER_NAME,
90 * 24 * 60 * 60); // 3 months.
return auth::setup_table(CREDENTIALS_CF, create_table).then([this] {
// instead of once-timer, just schedule this later
sleep(auth::SUPERUSER_SETUP_DELAY).then([] {
auth::has_existing_users(CREDENTIALS_CF, DEFAULT_USER_NAME, USER_NAME).then([](bool exists) {
if (!exists) {
cql3::get_local_query_processor().process(sprint("INSERT INTO %s.%s (%s, %s) VALUES (?, ?) USING TIMESTAMP 0",
auth::AUTH_KS,
CREDENTIALS_CF,
USER_NAME, SALTED_HASH
),
db::consistency_level::ONE, {DEFAULT_USER_NAME, hashpw(DEFAULT_USER_PASSWORD)}).then([](auto) {
logger.info("Created default user '{}'", DEFAULT_USER_NAME);
});
}
});
});
});
}
db::consistency_level auth::password_authenticator::consistency_for_user(const sstring& username) {
if (username == DEFAULT_USER_NAME) {
return db::consistency_level::QUORUM;
}
return db::consistency_level::LOCAL_ONE;
}
const sstring& auth::password_authenticator::class_name() const {
return PASSWORD_AUTHENTICATOR_NAME;
}
bool auth::password_authenticator::require_authentication() const {
return true;
}
auth::authenticator::option_set auth::password_authenticator::supported_options() const {
return option_set::of<option::PASSWORD>();
}
auth::authenticator::option_set auth::password_authenticator::alterable_options() const {
return option_set::of<option::PASSWORD>();
}
future<::shared_ptr<auth::authenticated_user> > auth::password_authenticator::authenticate(
const credentials_map& credentials) const
throw (exceptions::authentication_exception) {
if (!credentials.count(USERNAME_KEY)) {
throw exceptions::authentication_exception(sprint("Required key '%s' is missing", USERNAME_KEY));
}
if (!credentials.count(PASSWORD_KEY)) {
throw exceptions::authentication_exception(sprint("Required key '%s' is missing", PASSWORD_KEY));
}
auto& username = credentials.at(USERNAME_KEY);
auto& password = credentials.at(PASSWORD_KEY);
// Here was a thread local, explicit cache of prepared statement. In normal execution this is
// fine, but since we in testing set up and tear down system over and over, we'd start using
// obsolete prepared statements pretty quickly.
// Rely on query processing caching statements instead, and lets assume
// that a map lookup string->statement is not gonna kill us much.
auto& qp = cql3::get_local_query_processor();
return qp.process(
sprint("SELECT %s FROM %s.%s WHERE %s = ?", SALTED_HASH,
auth::AUTH_KS, CREDENTIALS_CF, USER_NAME),
consistency_for_user(username), { username }, true).then_wrapped(
[=](future<::shared_ptr<cql3::untyped_result_set>> f) {
try {
auto res = f.get0();
if (res->empty() || !checkpw(password, res->one().get_as<sstring>(SALTED_HASH))) {
throw exceptions::authentication_exception("Username and/or password are incorrect");
}
return make_ready_future<::shared_ptr<authenticated_user>>(::make_shared<authenticated_user>(username));
} catch (std::system_error &) {
std::throw_with_nested(exceptions::authentication_exception("Could not verify password"));
} catch (exceptions::request_execution_exception& e) {
std::throw_with_nested(exceptions::authentication_exception(e.what()));
}
});
}
future<> auth::password_authenticator::create(sstring username,
const option_map& options)
throw (exceptions::request_validation_exception,
exceptions::request_execution_exception) {
try {
auto password = boost::any_cast<sstring>(options.at(option::PASSWORD));
auto query = sprint("INSERT INTO %s.%s (%s, %s) VALUES (?, ?)",
auth::AUTH_KS, CREDENTIALS_CF, USER_NAME, SALTED_HASH);
auto& qp = cql3::get_local_query_processor();
return qp.process(query, consistency_for_user(username), { username, hashpw(password) }).discard_result();
} catch (std::out_of_range&) {
throw exceptions::invalid_request_exception("PasswordAuthenticator requires PASSWORD option");
}
}
future<> auth::password_authenticator::alter(sstring username,
const option_map& options)
throw (exceptions::request_validation_exception,
exceptions::request_execution_exception) {
try {
auto password = boost::any_cast<sstring>(options.at(option::PASSWORD));
auto query = sprint("UPDATE %s.%s SET %s = ? WHERE %s = ?",
auth::AUTH_KS, CREDENTIALS_CF, SALTED_HASH, USER_NAME);
auto& qp = cql3::get_local_query_processor();
return qp.process(query, consistency_for_user(username), { hashpw(password), username }).discard_result();
} catch (std::out_of_range&) {
throw exceptions::invalid_request_exception("PasswordAuthenticator requires PASSWORD option");
}
}
future<> auth::password_authenticator::drop(sstring username)
throw (exceptions::request_validation_exception,
exceptions::request_execution_exception) {
try {
auto query = sprint("DELETE FROM %s.%s WHERE %s = ?",
auth::AUTH_KS, CREDENTIALS_CF, USER_NAME);
auto& qp = cql3::get_local_query_processor();
return qp.process(query, consistency_for_user(username), { username }).discard_result();
} catch (std::out_of_range&) {
throw exceptions::invalid_request_exception("PasswordAuthenticator requires PASSWORD option");
}
}
auth::authenticator::resource_ids auth::password_authenticator::protected_resources() const {
return { data_resource(auth::AUTH_KS, CREDENTIALS_CF) };
}
::shared_ptr<auth::authenticator::sasl_challenge> auth::password_authenticator::new_sasl_challenge() const {
class plain_text_password_challenge: public sasl_challenge {
public:
plain_text_password_challenge(const password_authenticator& a)
: _authenticator(a)
{}
/**
* SASL PLAIN mechanism specifies that credentials are encoded in a
* sequence of UTF-8 bytes, delimited by 0 (US-ASCII NUL).
* The form is : {code}authzId<NUL>authnId<NUL>password<NUL>{code}
* authzId is optional, and in fact we don't care about it here as we'll
* set the authzId to match the authnId (that is, there is no concept of
* a user being authorized to act on behalf of another).
*
* @param bytes encoded credentials string sent by the client
* @return map containing the username/password pairs in the form an IAuthenticator
* would expect
* @throws javax.security.sasl.SaslException
*/
bytes evaluate_response(bytes_view client_response)
throw (exceptions::authentication_exception) override {
logger.debug("Decoding credentials from client token");
sstring username, password;
auto b = client_response.crbegin();
auto e = client_response.crend();
auto i = b;
while (i != e) {
if (*i == 0) {
sstring tmp(i.base(), b.base());
if (password.empty()) {
password = std::move(tmp);
} else if (username.empty()) {
username = std::move(tmp);
}
b = ++i;
continue;
}
++i;
}
if (username.empty()) {
throw exceptions::authentication_exception("Authentication ID must not be null");
}
if (password.empty()) {
throw exceptions::authentication_exception("Password must not be null");
}
_credentials[USERNAME_KEY] = std::move(username);
_credentials[PASSWORD_KEY] = std::move(password);
_complete = true;
return {};
}
bool is_complete() const override {
return _complete;
}
future<::shared_ptr<authenticated_user>> get_authenticated_user() const
throw (exceptions::authentication_exception) override {
return _authenticator.authenticate(_credentials);
}
private:
const password_authenticator& _authenticator;
credentials_map _credentials;
bool _complete = false;
};
return ::make_shared<plain_text_password_challenge>(*this);
}

View File

@@ -1,73 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "authenticator.hh"
namespace auth {
class password_authenticator : public authenticator {
public:
static const sstring PASSWORD_AUTHENTICATOR_NAME;
password_authenticator();
~password_authenticator();
future<> init();
const sstring& class_name() const override;
bool require_authentication() const override;
option_set supported_options() const override;
option_set alterable_options() const override;
future<::shared_ptr<authenticated_user>> authenticate(const credentials_map& credentials) const throw(exceptions::authentication_exception) override;
future<> create(sstring username, const option_map& options) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) override;
future<> alter(sstring username, const option_map& options) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) override;
future<> drop(sstring username) throw(exceptions::request_validation_exception, exceptions::request_execution_exception) override;
resource_ids protected_resources() const override;
::shared_ptr<sasl_challenge> new_sasl_challenge() const override;
static db::consistency_level consistency_for_user(const sstring& username);
};
}

View File

@@ -1,49 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "permission.hh"
const auth::permission_set auth::ALL_DATA = auth::permission_set::of
< auth::permission::CREATE, auth::permission::ALTER,
auth::permission::DROP, auth::permission::SELECT,
auth::permission::MODIFY, auth::permission::AUTHORIZE>();
const auth::permission_set auth::ALL = auth::ALL_DATA;
const auth::permission_set auth::NONE;

View File

@@ -1,81 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2016 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "enum_set.hh"
namespace auth {
enum class permission {
//Deprecated
READ,
//Deprecated
WRITE,
// schema management
CREATE, // required for CREATE KEYSPACE and CREATE TABLE.
ALTER, // required for ALTER KEYSPACE, ALTER TABLE, CREATE INDEX, DROP INDEX.
DROP, // required for DROP KEYSPACE and DROP TABLE.
// data access
SELECT, // required for SELECT.
MODIFY, // required for INSERT, UPDATE, DELETE, TRUNCATE.
// permission management
AUTHORIZE, // required for GRANT and REVOKE.
};
typedef enum_set<super_enum<permission,
permission::READ,
permission::WRITE,
permission::CREATE,
permission::ALTER,
permission::DROP,
permission::SELECT,
permission::MODIFY,
permission::AUTHORIZE>> permission_set;
extern const permission_set ALL_DATA;
extern const permission_set ALL;
extern const permission_set NONE;
}

View File

@@ -22,7 +22,6 @@
#pragma once
#include "core/sstring.hh"
#include "hashing.hh"
#include <experimental/optional>
#include <iosfwd>
#include <functional>
@@ -58,20 +57,3 @@ std::ostream& operator<<(std::ostream& os, const bytes_view& b);
}
template<>
struct appending_hash<bytes> {
template<typename Hasher>
void operator()(Hasher& h, const bytes& v) const {
feed_hash(h, v.size());
h.update(reinterpret_cast<const char*>(v.cbegin()), v.size() * sizeof(bytes::value_type));
}
};
template<>
struct appending_hash<bytes_view> {
template<typename Hasher>
void operator()(Hasher& h, bytes_view v) const {
feed_hash(h, v.size());
h.update(reinterpret_cast<const char*>(v.begin()), v.size() * sizeof(bytes_view::value_type));
}
};

View File

@@ -24,7 +24,6 @@
#include "types.hh"
#include "net/byteorder.hh"
#include "core/unaligned.hh"
#include "hashing.hh"
/**
* Utility for writing data into a buffer when its final size is not known up front.
@@ -34,10 +33,8 @@
*
*/
class bytes_ostream {
public:
using size_type = bytes::size_type;
using value_type = bytes::value_type;
private:
static_assert(sizeof(value_type) == 1, "value_type is assumed to be one byte long");
struct chunk {
// FIXME: group fragment pointers to reduce pointer chasing when packetizing
@@ -120,13 +117,13 @@ private:
};
}
public:
bytes_ostream() noexcept
bytes_ostream()
: _begin()
, _current(nullptr)
, _size(0)
{ }
bytes_ostream(bytes_ostream&& o) noexcept
bytes_ostream(bytes_ostream&& o)
: _begin(std::move(o._begin))
, _current(o._current)
, _size(o._size)
@@ -151,7 +148,7 @@ public:
return *this;
}
bytes_ostream& operator=(bytes_ostream&& o) noexcept {
bytes_ostream& operator=(bytes_ostream&& o) {
_size = o._size;
_begin = std::move(o._begin);
_current = o._current;
@@ -333,13 +330,3 @@ public:
_current->offset = pos._offset;
}
};
template<>
struct appending_hash<bytes_ostream> {
template<typename Hasher>
void operator()(Hasher& h, const bytes_ostream& b) const {
for (auto&& frag : b.fragments()) {
feed_hash(h, frag);
}
}
};

View File

@@ -43,13 +43,11 @@ class caching_options {
throw exceptions::configuration_exception("Invalid key value: " + k);
}
if ((r == "ALL") || (r == "NONE")) {
return;
} else {
try {
boost::lexical_cast<unsigned long>(r);
} catch (boost::bad_lexical_cast& e) {
throw exceptions::configuration_exception("Invalid key value: " + r);
try {
boost::lexical_cast<unsigned long>(r);
} catch (boost::bad_lexical_cast& e) {
if ((r != "ALL") && (r != "NONE")) {
throw exceptions::configuration_exception("Invalid key value: " + k);
}
}
}
@@ -82,12 +80,6 @@ public:
}
return caching_options(k, r);
}
bool operator==(const caching_options& other) const {
return _key_cache == other._key_cache && _row_cache == other._row_cache;
}
bool operator!=(const caching_options& other) const {
return !(*this == other);
}
};

View File

@@ -1,98 +0,0 @@
/*
* Copyright (C) 2015 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "canonical_mutation.hh"
#include "mutation.hh"
#include "mutation_partition_serializer.hh"
#include "converting_mutation_partition_applier.hh"
#include "hashing_partition_visitor.hh"
template class db::serializer<canonical_mutation>;
//
// Representation layout:
//
// <canonical_mutation> ::= <column_family_id> <table_schema_version> <partition_key> <column-mapping> <partition>
//
// For <partition> see mutation_partition_serializer.cc
// For <column-mapping> see db::serializer<column_mapping>
//
canonical_mutation::canonical_mutation(bytes data)
: _data(std::move(data))
{ }
canonical_mutation::canonical_mutation(const mutation& m)
: _data([&m] {
bytes_ostream out;
db::serializer<utils::UUID>(m.column_family_id()).write(out);
db::serializer<table_schema_version>(m.schema()->version()).write(out);
db::serializer<partition_key_view>(m.key()).write(out);
db::serializer<column_mapping>(m.schema()->get_column_mapping()).write(out);
mutation_partition_serializer ser(*m.schema(), m.partition());
ser.write(out);
return to_bytes(out.linearize());
}())
{ }
mutation canonical_mutation::to_mutation(schema_ptr s) const {
data_input in(_data);
auto cf_id = db::serializer<utils::UUID>::read(in);
if (s->id() != cf_id) {
throw std::runtime_error(sprint("Attempted to deserialize canonical_mutation of table %s with schema of table %s (%s.%s)",
cf_id, s->id(), s->ks_name(), s->cf_name()));
}
auto version = db::serializer<table_schema_version>::read(in);
auto pk = partition_key(db::serializer<partition_key_view>::read(in));
mutation m(std::move(pk), std::move(s));
if (version == m.schema()->version()) {
db::serializer<column_mapping>::skip(in);
auto partition_view = mutation_partition_serializer::read_as_view(in);
m.partition().apply(*m.schema(), partition_view, *m.schema());
} else {
column_mapping cm = db::serializer<column_mapping>::read(in);
converting_mutation_partition_applier v(cm, *m.schema(), m.partition());
auto partition_view = mutation_partition_serializer::read_as_view(in);
partition_view.accept(cm, v);
}
return m;
}
template<>
db::serializer<canonical_mutation>::serializer(const canonical_mutation& v)
: _item(v)
, _size(db::serializer<bytes>(v._data).size())
{ }
template<>
void
db::serializer<canonical_mutation>::write(output& out, const canonical_mutation& v) {
db::serializer<bytes>(v._data).write(out);
}
template<>
canonical_mutation db::serializer<canonical_mutation>::read(input& in) {
return canonical_mutation(db::serializer<bytes>::read(in));
}

View File

@@ -1,71 +0,0 @@
/*
* Copyright (C) 2015 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "bytes.hh"
#include "schema.hh"
#include "database_fwd.hh"
#include "db/serializer.hh"
#include "mutation_partition_visitor.hh"
#include "mutation_partition_serializer.hh"
// Immutable mutation form which can be read using any schema version of the same table.
// Safe to access from other shards via const&.
// Safe to pass serialized across nodes.
class canonical_mutation {
bytes _data;
canonical_mutation(bytes);
public:
explicit canonical_mutation(const mutation&);
canonical_mutation(canonical_mutation&&) = default;
canonical_mutation(const canonical_mutation&) = default;
canonical_mutation& operator=(const canonical_mutation&) = default;
canonical_mutation& operator=(canonical_mutation&&) = default;
// Create a mutation object interpreting this canonical mutation using
// given schema.
//
// Data which is not representable in the target schema is dropped. If this
// is not intended, user should sync the schema first.
mutation to_mutation(schema_ptr) const;
friend class db::serializer<canonical_mutation>;
};
//
//template<>
//struct hash<canonical_mutation> {
// template<typename Hasher>
// void operator()(Hasher& h, const canonical_mutation& m) const {
// m.feed_hash(h);
// }
//};
namespace db {
template<> serializer<canonical_mutation>::serializer(const canonical_mutation&);
template<> void serializer<canonical_mutation>::write(output&, const canonical_mutation&);
template<> canonical_mutation serializer<canonical_mutation>::read(input&);
extern template class serializer<canonical_mutation>;
}

View File

@@ -29,8 +29,7 @@ enum class compaction_strategy_type {
null,
major,
size_tiered,
leveled,
// FIXME: Add support to DateTiered.
// FIXME: Add support to LevelTiered, and DateTiered.
};
class compaction_strategy_impl;
@@ -55,26 +54,20 @@ public:
return "MajorCompactionStrategy";
case compaction_strategy_type::size_tiered:
return "SizeTieredCompactionStrategy";
case compaction_strategy_type::leveled:
return "LeveledCompactionStrategy";
default:
throw std::runtime_error("Invalid Compaction Strategy");
}
}
static compaction_strategy_type type(const sstring& name) {
auto pos = name.find("org.apache.cassandra.db.compaction.");
sstring short_name = (pos == sstring::npos) ? name : name.substr(pos + 35);
if (short_name == "NullCompactionStrategy") {
if (name == "NullCompactionStrategy") {
return compaction_strategy_type::null;
} else if (short_name == "MajorCompactionStrategy") {
} else if (name == "MajorCompactionStrategy") {
return compaction_strategy_type::major;
} else if (short_name == "SizeTieredCompactionStrategy") {
} else if (name == "SizeTieredCompactionStrategy") {
return compaction_strategy_type::size_tiered;
} else if (short_name == "LeveledCompactionStrategy") {
return compaction_strategy_type::leveled;
} else {
throw exceptions::configuration_exception(sprint("Unable to find compaction strategy class '%s'", name));
throw exceptions::configuration_exception(sprint("Unable to find compaction strategy class 'org.apache.cassandra.db.compaction.%s", name));
}
}

View File

@@ -68,7 +68,7 @@ public:
, _byte_order_equal(std::all_of(_types.begin(), _types.end(), [] (auto t) {
return t->is_byte_order_equal();
}))
, _byte_order_comparable(!is_prefixable && _types.size() == 1 && _types[0]->is_byte_order_comparable())
, _byte_order_comparable(_types.size() == 1 && _types[0]->is_byte_order_comparable())
, _is_reversed(_types.size() == 1 && _types[0]->is_reversed())
{ }
@@ -159,7 +159,7 @@ public:
}
return ::serialize_value(*this, values);
}
bytes serialize_value_deep(const std::vector<data_value>& values) {
bytes serialize_value_deep(const std::vector<boost::any>& values) {
// TODO: Optimize
std::vector<bytes> partial;
partial.reserve(values.size());
@@ -278,10 +278,10 @@ public:
});
}
bytes from_string(sstring_view s) {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
}
sstring to_string(const bytes& b) {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
}
// Retruns true iff given prefix has no missing components
bool is_full(bytes_view v) const {

View File

@@ -114,14 +114,6 @@ public:
}
return opts;
}
bool operator==(const compression_parameters& other) const {
return _compressor == other._compressor
&& _chunk_length == other._chunk_length
&& _crc_check_chance == other._crc_check_chance;
}
bool operator!=(const compression_parameters& other) const {
return !(*this == other);
}
private:
void validate_options(const std::map<sstring, sstring>& options) {
// currently, there are no options specific to a particular compressor

View File

@@ -1,15 +0,0 @@
#
# cassandra-rackdc.properties
#
# The lines may include white spaces at the beginning and the end.
# The rack and data center names may also include white spaces.
# All trailing and leading white spaces will be trimmed.
#
# dc=my_data_center
# rack=my_rack
# prefer_local=<false | true>
# dc_suffix=<Data Center name suffix, used by EC2SnitchXXX snitches>
#

View File

@@ -169,17 +169,6 @@ rpc_address: localhost
# port for Thrift to listen for clients on
rpc_port: 9160
# port for REST API server
api_port: 10000
# IP for the REST API server
api_address: 127.0.0.1
# Log WARN on any batch size exceeding this value. 5kb per batch by default.
# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
batch_size_warn_threshold_in_kb: 5
###################################################
## Not currently supported, reserved for future use
###################################################
@@ -216,7 +205,7 @@ batch_size_warn_threshold_in_kb: 5
# reduced proportionally to the number of nodes in the cluster.
# batchlog_replay_throttle_in_kb: 1024
# Authentication backend, identifying users
# Authentication backend, implementing IAuthenticator; used to identify users
# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator,
# PasswordAuthenticator}.
#
@@ -420,16 +409,15 @@ partitioner: org.apache.cassandra.dht.Murmur3Partitioner
# offheap_objects: native memory, eliminating nio buffer heap overhead
# memtable_allocation_type: heap_buffers
# Total space to use for commitlogs.
# Total space to use for commitlogs. Since commitlog segments are
# mmapped, and hence use up address space, the default size is 32
# on 32-bit JVMs, and 8192 on 64-bit JVMs.
#
# If space gets above this value (it will round up to the next nearest
# segment multiple), Scylla will flush every dirty CF in the oldest
# segment and remove it. So a small total commitlog space will tend
# to cause more flush activity on less-active columnfamilies.
#
# A value of -1 (default) will automatically equate it to the total amount of memory
# available for Scylla.
commitlog_total_space_in_mb: -1
commitlog_total_space_in_mb: 8192
# This sets the amount of memtable flush writer threads. These will
# be blocked by disk io, and each one will hold a memtable in memory
@@ -610,6 +598,10 @@ commitlog_total_space_in_mb: -1
# column_index_size_in_kb: 64
# Log WARN on any batch size exceeding this value. 5kb per batch by default.
# Caution should be taken on increasing the size of this threshold as it can lead to node instability.
# batch_size_warn_threshold_in_kb: 5
# Number of simultaneous compactions to allow, NOT including
# validation "compactions" for anti-entropy repair. Simultaneous
# compactions can help preserve read performance in a mixed read/write
@@ -789,25 +781,40 @@ commitlog_total_space_in_mb: -1
# the request scheduling. Currently the only valid option is keyspace.
# request_scheduler_id: keyspace
# Enable or disable inter-node encryption.
# You must also generate keys and provide the appropriate key and trust store locations and passwords.
# No custom encryption options are currently enabled. The available options are:
#
# Enable or disable inter-node encryption
# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
# suite for authentication, key exchange and encryption of the actual data transfers.
# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode.
# NOTE: No custom encryption options are enabled at the moment
# The available internode options are : all, none, dc, rack
# If set to dc scylla will encrypt the traffic between the DCs
# If set to rack scylla will encrypt the traffic between the racks
#
# If set to dc cassandra will encrypt the traffic between the DCs
# If set to rack cassandra will encrypt the traffic between the racks
#
# The passwords used in these options must match the passwords used when generating
# the keystore and truststore. For instructions on generating these files, see:
# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
#
# server_encryption_options:
# internode_encryption: none
# certificate: conf/scylla.crt
# keyfile: conf/scylla.key
# truststore: <none, use system trust>
# keystore: conf/.keystore
# keystore_password: cassandra
# truststore: conf/.truststore
# truststore_password: cassandra
# More advanced defaults below:
# protocol: TLS
# algorithm: SunX509
# store_type: JKS
# cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA]
# require_client_auth: false
# enable or disable client/server encryption.
# client_encryption_options:
# enabled: false
# certificate: conf/scylla.crt
# keyfile: conf/scylla.key
# keystore: conf/.keystore
# keystore_password: cassandra
# require_client_auth: false
# Set trustore and truststore_password if require_client_auth is true
@@ -831,17 +838,3 @@ commitlog_total_space_in_mb: -1
# reducing overhead from the TCP protocol itself, at the cost of increasing
# latency if you block for cross-datacenter responses.
# inter_dc_tcp_nodelay: false
# Relaxation of environment checks.
#
# Scylla places certain requirements on its environment. If these requirements are
# not met, performance and reliability can be degraded.
#
# These requirements include:
# - A filesystem with good support for aysnchronous I/O (AIO). Currently,
# this means XFS.
#
# false: strict environment checks are in place; do not start if they are not met.
# true: relaxed environment checks; performance and reliability may degraade.
#
# developer_mode: false

View File

@@ -50,9 +50,6 @@ def apply_tristate(var, test, note, missing):
return False
return False
def have_pkg(package):
return subprocess.call(['pkg-config', package]) == 0
def pkg_config(option, package):
output = subprocess.check_output(['pkg-config', option, package])
return output.decode('utf-8').strip()
@@ -135,9 +132,8 @@ modes = {
},
}
scylla_tests = [
urchin_tests = [
'tests/mutation_test',
'tests/canonical_mutation_test',
'tests/range_test',
'tests/types_test',
'tests/keys_test',
@@ -155,9 +151,7 @@ scylla_tests = [
'tests/perf/perf_sstable',
'tests/cql_query_test',
'tests/storage_proxy_test',
'tests/schema_change_test',
'tests/mutation_reader_test',
'tests/key_reader_test',
'tests/mutation_query_test',
'tests/row_cache_test',
'tests/test-serialization',
@@ -175,7 +169,6 @@ scylla_tests = [
'tests/compound_test',
'tests/config_test',
'tests/gossiping_property_file_snitch_test',
'tests/ec2_snitch_test',
'tests/snitch_reset_test',
'tests/network_topology_strategy_test',
'tests/query_processor_test',
@@ -187,16 +180,13 @@ scylla_tests = [
'tests/logalloc_test',
'tests/managed_vector_test',
'tests/crc_test',
'tests/flush_queue_test',
'tests/dynamic_bitset_test',
'tests/auth_test',
]
apps = [
'scylla',
]
tests = scylla_tests
tests = urchin_tests
all_artifacts = apps + tests
@@ -226,32 +216,24 @@ arg_parser.add_argument('--debuginfo', action = 'store', dest = 'debuginfo', typ
help = 'Enable(1)/disable(0)compiler debug information generation')
arg_parser.add_argument('--static-stdc++', dest = 'staticcxx', action = 'store_true',
help = 'Link libgcc and libstdc++ statically')
arg_parser.add_argument('--tests-debuginfo', action = 'store', dest = 'tests_debuginfo', type = int, default = 0,
help = 'Enable(1)/disable(0)compiler debug information generation for tests')
arg_parser.add_argument('--python', action = 'store', dest = 'python', default = 'python3',
help = 'Python3 path')
add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
args = arg_parser.parse_args()
defines = []
scylla_libs = '-llz4 -lsnappy -lz -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
urchin_libs = '-llz4 -lsnappy -lz -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
extra_cxxflags = {}
cassandra_interface = Thrift(source = 'interface/cassandra.thrift', service = 'Cassandra')
scylla_core = (['database.cc',
urchin_core = (['database.cc',
'schema.cc',
'frozen_schema.cc',
'schema_registry.cc',
'bytes.cc',
'mutation.cc',
'row_cache.cc',
'canonical_mutation.cc',
'frozen_mutation.cc',
'memtable.cc',
'schema_mutations.cc',
'release.cc',
'utils/logalloc.cc',
'utils/large_bitset.cc',
@@ -260,7 +242,6 @@ scylla_core = (['database.cc',
'mutation_partition_serializer.cc',
'mutation_reader.cc',
'mutation_query.cc',
'key_reader.cc',
'keys.cc',
'sstables/sstables.cc',
'sstables/compress.cc',
@@ -285,16 +266,12 @@ scylla_core = (['database.cc',
'cql3/maps.cc',
'cql3/functions/functions.cc',
'cql3/statements/cf_prop_defs.cc',
'cql3/statements/cf_statement.cc',
'cql3/statements/create_keyspace_statement.cc',
'cql3/statements/create_table_statement.cc',
'cql3/statements/drop_keyspace_statement.cc',
'cql3/statements/drop_table_statement.cc',
'cql3/statements/schema_altering_statement.cc',
'cql3/statements/ks_prop_defs.cc',
'cql3/statements/modification_statement.cc',
'cql3/statements/parsed_statement.cc',
'cql3/statements/property_definitions.cc',
'cql3/statements/update_statement.cc',
'cql3/statements/delete_statement.cc',
'cql3/statements/batch_statement.cc',
@@ -304,7 +281,6 @@ scylla_core = (['database.cc',
'cql3/statements/index_target.cc',
'cql3/statements/create_index_statement.cc',
'cql3/statements/truncate_statement.cc',
'cql3/statements/alter_table_statement.cc',
'cql3/update_parameters.cc',
'cql3/ut_name.cc',
'thrift/handler.cc',
@@ -355,7 +331,6 @@ scylla_core = (['database.cc',
'utils/rate_limiter.cc',
'utils/compaction_manager.cc',
'utils/file_lock.cc',
'utils/dynamic_bitset.cc',
'gms/version_generator.cc',
'gms/versioned_value.cc',
'gms/gossiper.cc',
@@ -364,12 +339,10 @@ scylla_core = (['database.cc',
'gms/gossip_digest_ack.cc',
'gms/gossip_digest_ack2.cc',
'gms/endpoint_state.cc',
'gms/application_state.cc',
'dht/i_partitioner.cc',
'dht/murmur3_partitioner.cc',
'dht/byte_ordered_partitioner.cc',
'dht/boot_strapper.cc',
'dht/range_streamer.cc',
'unimplemented.cc',
'query.cc',
'query-result-set.cc',
@@ -383,17 +356,9 @@ scylla_core = (['database.cc',
'locator/simple_snitch.cc',
'locator/rack_inferring_snitch.cc',
'locator/gossiping_property_file_snitch.cc',
'locator/production_snitch_base.cc',
'locator/ec2_snitch.cc',
'locator/ec2_multi_region_snitch.cc',
'message/messaging_service.cc',
'service/client_state.cc',
'service/migration_task.cc',
'service/storage_service.cc',
'service/pending_range_calculator_service.cc',
'service/load_broadcaster.cc',
'service/pager/paging_state.cc',
'service/pager/query_pagers.cc',
'streaming/streaming.cc',
'streaming/stream_task.cc',
'streaming/stream_session.cc',
@@ -414,19 +379,11 @@ scylla_core = (['database.cc',
'streaming/messages/file_message_header.cc',
'streaming/messages/outgoing_file_message.cc',
'streaming/messages/incoming_file_message.cc',
'streaming/stream_session_state.cc',
'gc_clock.cc',
'partition_slice_builder.cc',
'init.cc',
'repair/repair.cc',
'exceptions/exceptions.cc',
'dns.cc',
'auth/auth.cc',
'auth/authenticated_user.cc',
'auth/authenticator.cc',
'auth/data_resource.cc',
'auth/password_authenticator.cc',
'auth/permission.cc',
]
+ [Antlr3Grammar('cql3/Cql.g')]
+ [Thrift('interface/cassandra.thrift', 'Cassandra')]
@@ -462,24 +419,22 @@ api = ['api/api.cc',
'api/lsa.cc',
'api/api-doc/stream_manager.json',
'api/stream_manager.cc',
'api/api-doc/system.json',
'api/system.cc'
]
scylla_tests_dependencies = scylla_core + [
urchin_tests_dependencies = urchin_core + [
'tests/cql_test_env.cc',
'tests/cql_assertions.cc',
'tests/result_set_assertions.cc',
'tests/mutation_source_test.cc',
]
scylla_tests_seastar_deps = [
urchin_tests_seastar_deps = [
'seastar/tests/test-utils.cc',
'seastar/tests/test_runner.cc',
]
deps = {
'scylla': ['main.cc'] + scylla_core + api,
'scylla': ['main.cc'] + urchin_core + api,
}
tests_not_using_seastar_test_framework = set([
@@ -488,7 +443,6 @@ tests_not_using_seastar_test_framework = set([
'tests/partitioner_test',
'tests/map_difference_test',
'tests/frozen_mutation_test',
'tests/canonical_mutation_test',
'tests/perf/perf_mutation',
'tests/lsa_async_eviction_test',
'tests/lsa_sync_eviction_test',
@@ -507,24 +461,23 @@ tests_not_using_seastar_test_framework = set([
'tests/crc_test',
'tests/perf/perf_sstable',
'tests/managed_vector_test',
'tests/dynamic_bitset_test',
])
for t in tests_not_using_seastar_test_framework:
if not t in scylla_tests:
raise Exception("Test %s not found in scylla_tests" % (t))
if not t in urchin_tests:
raise Exception("Test %s not found in urchin_tests" % (t))
for t in scylla_tests:
deps[t] = scylla_tests_dependencies + [t + '.cc']
for t in urchin_tests:
deps[t] = urchin_tests_dependencies + [t + '.cc']
if t not in tests_not_using_seastar_test_framework:
deps[t] += scylla_tests_seastar_deps
deps[t] += urchin_tests_seastar_deps
deps['tests/sstable_test'] += ['tests/sstable_datafile_test.cc']
deps['tests/bytes_ostream_test'] = ['tests/bytes_ostream_test.cc']
deps['tests/UUID_test'] = ['utils/UUID_gen.cc', 'tests/UUID_test.cc']
deps['tests/murmur_hash_test'] = ['bytes.cc', 'utils/murmur_hash.cc', 'tests/murmur_hash_test.cc']
deps['tests/allocation_strategy_test'] = ['tests/allocation_strategy_test.cc', 'utils/logalloc.cc', 'log.cc', 'utils/dynamic_bitset.cc']
deps['tests/allocation_strategy_test'] = ['tests/allocation_strategy_test.cc', 'utils/logalloc.cc', 'log.cc']
warnings = [
'-Wno-mismatched-tags', # clang-only
@@ -538,7 +491,6 @@ warnings = [w
warnings = ' '.join(warnings)
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
tests_link_rule = 'link' if args.tests_debuginfo else 'link_stripped'
if args.so:
args.pie = '-shared'
@@ -550,17 +502,6 @@ else:
args.pie = ''
args.fpie = ''
optional_packages = ['libsystemd']
pkgs = []
for pkg in optional_packages:
if have_pkg(pkg):
pkgs.append(pkg)
upkg = pkg.upper().replace('-', '_')
defines.append('HAVE_{}=1'.format(upkg))
else:
print('Missing optional package {pkg}'.format(**locals()))
defines = ' '.join(['-D' + d for d in defines])
globals().update(vars(args))
@@ -590,10 +531,9 @@ if args.dpdk:
elif args.dpdk_target:
seastar_flags += ['--dpdk-target', args.dpdk_target]
seastar_cflags = args.user_cflags + " -march=nehalem"
seastar_flags += ['--compiler', args.cxx, '--cflags=%s' % (seastar_cflags)]
seastar_flags += ['--compiler', args.cxx, '--cflags=-march=nehalem']
status = subprocess.call([python, './configure.py'] + seastar_flags, cwd = 'seastar')
status = subprocess.call(['./configure.py'] + seastar_flags, cwd = 'seastar')
if status != 0:
print('Seastar configuration failed')
@@ -622,10 +562,7 @@ for mode in build_modes:
seastar_deps = 'practically_anything_can_change_so_lets_run_it_every_time_and_restat.'
args.user_cflags += " " + pkg_config("--cflags", "jsoncpp")
libs = "-lyaml-cpp -llz4 -lz -lsnappy " + pkg_config("--libs", "jsoncpp") + ' -lboost_filesystem' + ' -lcrypt'
for pkg in pkgs:
args.user_cflags += ' ' + pkg_config('--cflags', pkg)
libs += ' ' + pkg_config('--libs', pkg)
libs = "-lyaml-cpp -llz4 -lz -lsnappy " + pkg_config("--libs", "jsoncpp") + ' -lboost_filesystem'
user_cflags = args.user_cflags
user_ldflags = args.user_ldflags
if args.staticcxx:
@@ -728,7 +665,7 @@ with open(buildfile, 'w') as f:
# So we strip the tests by default; The user can very
# quickly re-link the test unstripped by adding a "_g"
# to the test name, e.g., "ninja build/release/testname_g"
f.write('build $builddir/{}/{}: {}.{} {} {}\n'.format(mode, binary, tests_link_rule, mode, str.join(' ', objs),
f.write('build $builddir/{}/{}: link_stripped.{} {} {}\n'.format(mode, binary, mode, str.join(' ', objs),
'seastar/build/{}/libseastar.a'.format(mode)))
if has_thrift:
f.write(' libs = -lthrift -lboost_system $libs\n')
@@ -792,7 +729,7 @@ with open(buildfile, 'w') as f:
f.write('build {}: phony\n'.format(seastar_deps))
f.write(textwrap.dedent('''\
rule configure
command = {python} configure.py $configure_args
command = python3 configure.py $configure_args
generator = 1
build build.ninja: configure | configure.py
rule cscope

View File

@@ -1,119 +0,0 @@
/*
* Copyright (C) 2015 Cloudius Systems, Ltd.
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "mutation_partition_view.hh"
#include "schema.hh"
// Mutation partition visitor which applies visited data into
// existing mutation_partition. The visited data may be of a different schema.
// Data which is not representable in the new schema is dropped.
// Weak exception guarantees.
class converting_mutation_partition_applier : public mutation_partition_visitor {
const schema& _p_schema;
mutation_partition& _p;
const column_mapping& _visited_column_mapping;
deletable_row* _current_row;
private:
static bool is_compatible(const column_definition& new_def, const data_type& old_type, column_kind kind) {
return new_def.kind == kind && new_def.type->is_value_compatible_with(*old_type);
}
void accept_cell(row& dst, column_kind kind, const column_definition& new_def, const data_type& old_type, atomic_cell_view cell) {
if (is_compatible(new_def, old_type, kind) && cell.timestamp() > new_def.dropped_at()) {
dst.apply(new_def, atomic_cell_or_collection(cell));
}
}
void accept_cell(row& dst, column_kind kind, const column_definition& new_def, const data_type& old_type, collection_mutation_view cell) {
if (!is_compatible(new_def, old_type, kind)) {
return;
}
auto&& ctype = static_pointer_cast<const collection_type_impl>(old_type);
auto old_view = ctype->deserialize_mutation_form(cell);
collection_type_impl::mutation_view new_view;
if (old_view.tomb.timestamp > new_def.dropped_at()) {
new_view.tomb = old_view.tomb;
}
for (auto& c : old_view.cells) {
if (c.second.timestamp() > new_def.dropped_at()) {
new_view.cells.emplace_back(std::move(c));
}
}
dst.apply(new_def, ctype->serialize_mutation_form(std::move(new_view)));
}
public:
converting_mutation_partition_applier(
const column_mapping& visited_column_mapping,
const schema& target_schema,
mutation_partition& target)
: _p_schema(target_schema)
, _p(target)
, _visited_column_mapping(visited_column_mapping)
{ }
virtual void accept_partition_tombstone(tombstone t) override {
_p.apply(t);
}
virtual void accept_static_cell(column_id id, atomic_cell_view cell) override {
const column_mapping::column& col = _visited_column_mapping.static_column_at(id);
const column_definition* def = _p_schema.get_column_definition(col.name());
if (def) {
accept_cell(_p._static_row, column_kind::static_column, *def, col.type(), cell);
}
}
virtual void accept_static_cell(column_id id, collection_mutation_view collection) override {
const column_mapping::column& col = _visited_column_mapping.static_column_at(id);
const column_definition* def = _p_schema.get_column_definition(col.name());
if (def) {
accept_cell(_p._static_row, column_kind::static_column, *def, col.type(), collection);
}
}
virtual void accept_row_tombstone(clustering_key_prefix_view prefix, tombstone t) override {
_p.apply_row_tombstone(_p_schema, prefix, t);
}
virtual void accept_row(clustering_key_view key, tombstone deleted_at, const row_marker& rm) override {
deletable_row& r = _p.clustered_row(_p_schema, key);
r.apply(rm);
r.apply(deleted_at);
_current_row = &r;
}
virtual void accept_row_cell(column_id id, atomic_cell_view cell) override {
const column_mapping::column& col = _visited_column_mapping.regular_column_at(id);
const column_definition* def = _p_schema.get_column_definition(col.name());
if (def) {
accept_cell(_current_row->cells(), column_kind::regular_column, *def, col.type(), cell);
}
}
virtual void accept_row_cell(column_id id, collection_mutation_view collection) override {
const column_mapping::column& col = _visited_column_mapping.regular_column_at(id);
const column_definition* def = _p_schema.get_column_definition(col.name());
if (def) {
accept_cell(_current_row->cells(), column_kind::regular_column, *def, col.type(), collection);
}
}
};

View File

@@ -31,7 +31,6 @@ options {
@parser::includes {
#include "cql3/selection/writetime_or_ttl.hh"
#include "cql3/statements/alter_table_statement.hh"
#include "cql3/statements/create_keyspace_statement.hh"
#include "cql3/statements/drop_keyspace_statement.hh"
#include "cql3/statements/create_index_statement.hh"
@@ -270,9 +269,7 @@ cqlStatement returns [shared_ptr<parsed_statement> stmt]
| st12=dropTableStatement { $stmt = st12; }
#if 0
| st13=dropIndexStatement { $stmt = st13; }
#endif
| st14=alterTableStatement { $stmt = st14; }
#if 0
| st15=alterKeyspaceStatement { $stmt = st15; }
| st16=grantStatement { $stmt = st16; }
| st17=revokeStatement { $stmt = st17; }
@@ -771,7 +768,7 @@ alterKeyspaceStatement returns [AlterKeyspaceStatement expr]
: K_ALTER K_KEYSPACE ks=keyspaceName
K_WITH properties[attrs] { $expr = new AlterKeyspaceStatement(ks, attrs); }
;
#endif
/**
* ALTER COLUMN FAMILY <CF> ALTER <column> TYPE <newtype>;
@@ -780,29 +777,27 @@ alterKeyspaceStatement returns [AlterKeyspaceStatement expr]
* ALTER COLUMN FAMILY <CF> WITH <property> = <value>;
* ALTER COLUMN FAMILY <CF> RENAME <column> TO <column>;
*/
alterTableStatement returns [shared_ptr<alter_table_statement> expr]
alterTableStatement returns [AlterTableStatement expr]
@init {
alter_table_statement::type type;
auto props = make_shared<cql3::statements::cf_prop_defs>();;
std::vector<std::pair<shared_ptr<cql3::column_identifier::raw>, shared_ptr<cql3::column_identifier::raw>>> renames;
bool is_static = false;
AlterTableStatement.Type type = null;
CFPropDefs props = new CFPropDefs();
Map<ColumnIdentifier.Raw, ColumnIdentifier.Raw> renames = new HashMap<ColumnIdentifier.Raw, ColumnIdentifier.Raw>();
boolean isStatic = false;
}
: K_ALTER K_COLUMNFAMILY cf=columnFamilyName
( K_ALTER id=cident K_TYPE v=comparatorType { type = alter_table_statement::type::alter; }
| K_ADD id=cident v=comparatorType ({ is_static=true; } K_STATIC)? { type = alter_table_statement::type::add; }
| K_DROP id=cident { type = alter_table_statement::type::drop; }
| K_WITH properties[props] { type = alter_table_statement::type::opts; }
| K_RENAME { type = alter_table_statement::type::rename; }
id1=cident K_TO toId1=cident { renames.emplace_back(id1, toId1); }
( K_AND idn=cident K_TO toIdn=cident { renames.emplace_back(idn, toIdn); } )*
( K_ALTER id=cident K_TYPE v=comparatorType { type = AlterTableStatement.Type.ALTER; }
| K_ADD id=cident v=comparatorType ({ isStatic=true; } K_STATIC)? { type = AlterTableStatement.Type.ADD; }
| K_DROP id=cident { type = AlterTableStatement.Type.DROP; }
| K_WITH properties[props] { type = AlterTableStatement.Type.OPTS; }
| K_RENAME { type = AlterTableStatement.Type.RENAME; }
id1=cident K_TO toId1=cident { renames.put(id1, toId1); }
( K_AND idn=cident K_TO toIdn=cident { renames.put(idn, toIdn); } )*
)
{
$expr = ::make_shared<alter_table_statement>(std::move(cf), type, std::move(id),
std::move(v), std::move(props), std::move(renames), is_static);
$expr = new AlterTableStatement(cf, type, id, v, props, renames, isStatic);
}
;
#if 0
/**
* ALTER TYPE <name> ALTER <field> TYPE <newtype>;
* ALTER TYPE <name> ADD <field> <newtype>;
@@ -861,7 +856,7 @@ dropIndexStatement returns [DropIndexStatement expr]
* TRUNCATE <CF>;
*/
truncateStatement returns [::shared_ptr<truncate_statement> stmt]
: K_TRUNCATE (K_COLUMNFAMILY)? cf=columnFamilyName { $stmt = ::make_shared<truncate_statement>(cf); }
: K_TRUNCATE cf=columnFamilyName { $stmt = ::make_shared<truncate_statement>(cf); }
;
#if 0
@@ -1229,8 +1224,8 @@ properties[::shared_ptr<cql3::statements::property_definitions> props]
;
property[::shared_ptr<cql3::statements::property_definitions> props]
: k=ident '=' simple=propertyValue { try { $props->add_property(k->to_string(), simple); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
| k=ident '=' map=mapLiteral { try { $props->add_property(k->to_string(), convert_property_map(map)); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
: k=ident '=' (simple=propertyValue { try { $props->add_property(k->to_string(), simple); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } }
| map=mapLiteral { try { $props->add_property(k->to_string(), convert_property_map(map)); } catch (exceptions::syntax_exception e) { add_recognition_error(e.what()); } })
;
propertyValue returns [sstring str]
@@ -1248,7 +1243,6 @@ relationType returns [const cql3::operator_type* op = nullptr]
;
relation[std::vector<cql3::relation_ptr>& clauses]
@init{ const cql3::operator_type* rt = nullptr; }
: name=cident type=relationType t=term { $clauses.emplace_back(::make_shared<cql3::single_column_relation>(std::move(name), *type, std::move(t))); }
| K_TOKEN l=tupleOfIdentifiers type=relationType t=term
@@ -1258,9 +1252,11 @@ relation[std::vector<cql3::relation_ptr>& clauses]
{ $clauses.emplace_back(make_shared<cql3::single_column_relation>(std::move(name), cql3::operator_type::IN, std::move(marker))); }
| name=cident K_IN in_values=singleColumnInValues
{ $clauses.emplace_back(cql3::single_column_relation::create_in_relation(std::move(name), std::move(in_values))); }
| name=cident K_CONTAINS { rt = &cql3::operator_type::CONTAINS; } (K_KEY { rt = &cql3::operator_type::CONTAINS_KEY; })?
t=term { $clauses.emplace_back(make_shared<cql3::single_column_relation>(std::move(name), *rt, std::move(t))); }
| name=cident '[' key=term ']' type=relationType t=term { $clauses.emplace_back(make_shared<cql3::single_column_relation>(std::move(name), std::move(key), *type, std::move(t))); }
#if 0
| name=cident K_CONTAINS { Operator rt = Operator.CONTAINS; } (K_KEY { rt = Operator.CONTAINS_KEY; })?
t=term { $clauses.add(new SingleColumnRelation(name, rt, t)); }
| name=cident '[' key=term ']' type=relationType t=term { $clauses.add(new SingleColumnRelation(name, key, type, t)); }
#endif
| ids=tupleOfIdentifiers
( K_IN
( '(' ')'

View File

@@ -80,7 +80,7 @@ int64_t attributes::get_timestamp(int64_t now, const query_options& options) {
} catch (marshal_exception e) {
throw exceptions::invalid_request_exception("Invalid timestamp value");
}
return value_cast<int64_t>(data_type_for<int64_t>()->deserialize(*tval));
return boost::any_cast<int64_t>(data_type_for<int64_t>()->deserialize(*tval));
}
int32_t attributes::get_time_to_live(const query_options& options) {
@@ -99,7 +99,7 @@ int32_t attributes::get_time_to_live(const query_options& options) {
throw exceptions::invalid_request_exception("Invalid TTL value");
}
auto ttl = value_cast<int32_t>(data_type_for<int32_t>()->deserialize(*tval));
auto ttl = boost::any_cast<int32_t>(data_type_for<int32_t>()->deserialize(*tval));
if (ttl < 0) {
throw exceptions::invalid_request_exception("A TTL must be greater or equal to 0");
}

View File

@@ -55,11 +55,14 @@ namespace cql3 {
* Represents an identifer for a CQL column definition.
* TODO : should support light-weight mode without text representation for when not interned
*/
class column_identifier final : public selection::selectable {
class column_identifier final : public selection::selectable /* implements IMeasurableMemory*/ {
public:
bytes bytes_;
private:
sstring _text;
#if 0
private static final long EMPTY_SIZE = ObjectSizes.measure(new ColumnIdentifier("", true));
#endif
public:
column_identifier(sstring raw_text, bool keep_case);
@@ -80,6 +83,20 @@ public:
}
#if 0
public long unsharedHeapSize()
{
return EMPTY_SIZE
+ ObjectSizes.sizeOnHeapOf(bytes)
+ ObjectSizes.sizeOf(text);
}
public long unsharedHeapSizeExcludingData()
{
return EMPTY_SIZE
+ ObjectSizes.sizeOnHeapExcludingData(bytes)
+ ObjectSizes.sizeOf(text);
}
public ColumnIdentifier clone(AbstractAllocator allocator)
{
return new ColumnIdentifier(allocator.clone(bytes), text);

View File

@@ -160,7 +160,7 @@ void constants::deleter::execute(mutation& m, const exploded_clustering_prefix&
auto ctype = static_pointer_cast<const collection_type_impl>(column.type);
m.set_cell(prefix, column, atomic_cell_or_collection::from_collection_mutation(ctype->serialize_mutation_form(coll_m)));
} else {
m.set_cell(prefix, column, make_dead_cell(params));
m.set_cell(prefix, column, params.make_dead_cell());
}
}

View File

@@ -197,7 +197,7 @@ public:
virtual void execute(mutation& m, const exploded_clustering_prefix& prefix, const update_parameters& params) override {
auto value = _t->bind_and_get(params._options);
auto cell = value ? make_cell(*value, params) : make_dead_cell(params);
auto cell = value ? params.make_cell(*value) : params.make_dead_cell();
m.set_cell(prefix, column, std::move(cell));
}
};

View File

@@ -98,10 +98,6 @@ public:
execute_internal(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) = 0;
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const = 0;
virtual bool depends_on_keyspace(const sstring& ks_name) const = 0;
virtual bool depends_on_column_family(const sstring& cf_name) const = 0;
};
}

View File

@@ -90,7 +90,7 @@ public:
if (!values[0]) {
return;
}
_sum += value_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
_sum += boost::any_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
}
};
@@ -132,7 +132,7 @@ public:
return;
}
++_count;
_sum += value_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
_sum += boost::any_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
}
};
@@ -169,7 +169,7 @@ public:
if (!values[0]) {
return;
}
auto val = value_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
auto val = boost::any_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
if (!_max) {
_max = val;
} else {
@@ -216,7 +216,7 @@ public:
if (!values[0]) {
return;
}
auto val = value_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
auto val = boost::any_cast<Type>(data_type_for<Type>()->deserialize(*values[0]));
if (!_min) {
_min = val;
} else {

View File

@@ -50,11 +50,6 @@ functions::init() {
if (type == cql3_type::varchar || type == cql3_type::blob) {
continue;
}
// counters are not supported yet
if (type->is_counter()) {
warn(unimplemented::cause::COUNTERS);
continue;
}
declare(make_to_blob_function(type->get_type()));
declare(make_from_blob_function(type->get_type()));

View File

@@ -71,10 +71,10 @@ make_min_timeuuid_fct() {
return {};
}
auto ts_obj = timestamp_type->deserialize(*bb);
if (ts_obj.is_null()) {
if (ts_obj.empty()) {
return {};
}
auto ts = value_cast<db_clock::time_point>(ts_obj);
auto ts = boost::any_cast<db_clock::time_point>(ts_obj);
auto uuid = utils::UUID_gen::min_time_UUID(ts.time_since_epoch().count());
return {timeuuid_type->decompose(uuid)};
});
@@ -91,10 +91,10 @@ make_max_timeuuid_fct() {
return {};
}
auto ts_obj = timestamp_type->deserialize(*bb);
if (ts_obj.is_null()) {
if (ts_obj.empty()) {
return {};
}
auto ts = value_cast<db_clock::time_point>(ts_obj);
auto ts = boost::any_cast<db_clock::time_point>(ts_obj);
auto uuid = utils::UUID_gen::max_time_UUID(ts.time_since_epoch().count());
return {timeuuid_type->decompose(uuid)};
});

View File

@@ -54,7 +54,7 @@ shared_ptr<function>
make_uuid_fct() {
return make_native_scalar_function<false>("uuid", uuid_type, {},
[] (serialization_format sf, const std::vector<bytes_opt>& parameters) -> bytes_opt {
return {uuid_type->decompose(utils::make_random_uuid())};
return {uuid_type->decompose(boost::any(utils::make_random_uuid()))};
});
}

View File

@@ -113,12 +113,12 @@ lists::value::from_serialized(bytes_view v, list_type type, serialization_format
// Collections have this small hack that validate cannot be called on a serialized object,
// but compose does the validation (so we're fine).
// FIXME: deserializeForNativeProtocol()?!
auto l = value_cast<list_type_impl::native_type>(type->deserialize(v, sf));
auto l = boost::any_cast<list_type_impl::native_type>(type->deserialize(v, sf));
std::vector<bytes_opt> elements;
elements.reserve(l.size());
for (auto&& element : l) {
// elements can be null in lists that represent a set of IN values
elements.push_back(element.is_null() ? bytes_opt() : bytes_opt(type->get_elements_type()->decompose(element)));
elements.push_back(element.empty() ? bytes_opt() : bytes_opt(type->get_elements_type()->decompose(element)));
}
return value(std::move(elements));
} catch (marshal_exception& e) {
@@ -272,9 +272,9 @@ lists::setter_by_index::execute(mutation& m, const exploded_clustering_prefix& p
auto existing_list_opt = params.get_prefetched_list(m.key(), row_key, column);
if (!existing_list_opt) {
throw exceptions::invalid_request_exception("Attempted to set an element on a list which is null");
throw exceptions::invalid_request_exception(sprint("List index %d out of bound, list has size 0", idx));
}
collection_mutation_view existing_list_ser = *existing_list_opt;
collection_mutation::view existing_list_ser = *existing_list_opt;
auto ltype = dynamic_pointer_cast<const list_type_impl>(column.type);
collection_type_impl::mutation_view existing_list = ltype->deserialize_mutation_form(existing_list_ser);
// we verified that index is an int32_type
@@ -339,7 +339,7 @@ lists::do_append(shared_ptr<term> t,
} else {
auto&& to_add = list_value->_elements;
auto deref = [] (const bytes_opt& v) { return *v; };
auto&& newv = collection_mutation{list_type_impl::pack(
auto&& newv = collection_mutation::one{list_type_impl::pack(
boost::make_transform_iterator(to_add.begin(), deref),
boost::make_transform_iterator(to_add.end(), deref),
to_add.size(), serialization_format::internal())};
@@ -448,7 +448,7 @@ lists::discarder_by_index::execute(mutation& m, const exploded_clustering_prefix
auto&& existing_list = params.get_prefetched_list(m.key(), row_key, column);
int32_t idx = read_simple_exactly<int32_t>(*cvalue->_bytes);
if (!existing_list) {
throw exceptions::invalid_request_exception("Attempted to delete an element from a list which is null");
throw exceptions::invalid_request_exception("List does not exist");
}
auto&& deserialized = ltype->deserialize_mutation_form(*existing_list);
if (idx < 0 || size_t(idx) >= deserialized.cells.size()) {

View File

@@ -114,26 +114,30 @@ maps::literal::validate_assignable_to(database& db, const sstring& keyspace, col
assignment_testable::test_result
maps::literal::test_assignment(database& db, const sstring& keyspace, ::shared_ptr<column_specification> receiver) {
if (!dynamic_pointer_cast<const map_type_impl>(receiver->type)) {
return assignment_testable::test_result::NOT_ASSIGNABLE;
}
throw std::runtime_error("not implemented");
#if 0
if (!(receiver.type instanceof MapType))
return AssignmentTestable.TestResult.NOT_ASSIGNABLE;
// If there is no elements, we can't say it's an exact match (an empty map if fundamentally polymorphic).
if (entries.empty()) {
return assignment_testable::test_result::WEAKLY_ASSIGNABLE;
}
auto key_spec = maps::key_spec_of(*receiver);
auto value_spec = maps::value_spec_of(*receiver);
if (entries.isEmpty())
return AssignmentTestable.TestResult.WEAKLY_ASSIGNABLE;
ColumnSpecification keySpec = Maps.keySpecOf(receiver);
ColumnSpecification valueSpec = Maps.valueSpecOf(receiver);
// It's an exact match if all are exact match, but is not assignable as soon as any is non assignable.
auto res = assignment_testable::test_result::EXACT_MATCH;
for (auto entry : entries) {
auto t1 = entry.first->test_assignment(db, keyspace, key_spec);
auto t2 = entry.second->test_assignment(db, keyspace, value_spec);
if (t1 == assignment_testable::test_result::NOT_ASSIGNABLE || t2 == assignment_testable::test_result::NOT_ASSIGNABLE)
return assignment_testable::test_result::NOT_ASSIGNABLE;
if (t1 != assignment_testable::test_result::EXACT_MATCH || t2 != assignment_testable::test_result::EXACT_MATCH)
res = assignment_testable::test_result::WEAKLY_ASSIGNABLE;
AssignmentTestable.TestResult res = AssignmentTestable.TestResult.EXACT_MATCH;
for (Pair<Term.Raw, Term.Raw> entry : entries)
{
AssignmentTestable.TestResult t1 = entry.left.testAssignment(keyspace, keySpec);
AssignmentTestable.TestResult t2 = entry.right.testAssignment(keyspace, valueSpec);
if (t1 == AssignmentTestable.TestResult.NOT_ASSIGNABLE || t2 == AssignmentTestable.TestResult.NOT_ASSIGNABLE)
return AssignmentTestable.TestResult.NOT_ASSIGNABLE;
if (t1 != AssignmentTestable.TestResult.EXACT_MATCH || t2 != AssignmentTestable.TestResult.EXACT_MATCH)
res = AssignmentTestable.TestResult.WEAKLY_ASSIGNABLE;
}
return res;
#endif
}
sstring
@@ -157,7 +161,7 @@ maps::value::from_serialized(bytes_view value, map_type type, serialization_form
// Collections have this small hack that validate cannot be called on a serialized object,
// but compose does the validation (so we're fine).
// FIXME: deserialize_for_native_protocol?!
auto m = value_cast<map_type_impl::native_type>(type->deserialize(value, sf));
auto m = boost::any_cast<map_type_impl::native_type>(type->deserialize(value, sf));
std::map<bytes, bytes, serialized_compare> map(type->get_keys_type()->as_less_comparator());
for (auto&& e : m) {
map.emplace(type->get_keys_type()->decompose(e.first),
@@ -346,8 +350,10 @@ maps::discarder_by_key::execute(mutation& m, const exploded_clustering_prefix& p
if (!key) {
throw exceptions::invalid_request_exception("Invalid null map key");
}
auto ckey = dynamic_pointer_cast<constants::value>(std::move(key));
assert(ckey);
collection_type_impl::mutation mut;
mut.cells.emplace_back(*key->get(params._options), params.make_dead_cell());
mut.cells.emplace_back(*ckey->_bytes, params.make_dead_cell());
auto mtype = static_cast<const map_type_impl*>(column.type.get());
m.set_cell(prefix, column, mtype->serialize_mutation_form(mut));
}

View File

@@ -216,7 +216,7 @@ operation::element_deletion::prepare(database& db, const sstring& keyspace, cons
return make_shared<lists::discarder_by_index>(receiver, std::move(idx));
} else if (&ctype->_kind == &collection_type_impl::kind::set) {
auto&& elt = _element->prepare(db, keyspace, sets::value_spec_of(receiver.column_specification));
return make_shared<sets::element_discarder>(receiver, std::move(elt));
return make_shared<sets::discarder>(receiver, std::move(elt));
} else if (&ctype->_kind == &collection_type_impl::kind::map) {
auto&& key = _element->prepare(db, keyspace, maps::key_spec_of(*receiver.column_specification));
return make_shared<maps::discarder_by_key>(receiver, std::move(key));

View File

@@ -45,7 +45,6 @@
#include "exceptions/exceptions.hh"
#include "database_fwd.hh"
#include "term.hh"
#include "update_parameters.hh"
#include <experimental/optional>
@@ -87,14 +86,6 @@ public:
virtual ~operation() {}
atomic_cell make_dead_cell(const update_parameters& params) const {
return params.make_dead_cell();
}
atomic_cell make_cell(bytes_view value, const update_parameters& params) const {
return params.make_cell(value);
}
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const {
return _t && _t->uses_function(ks_name, function_name);
}
@@ -199,7 +190,13 @@ public:
}
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver);
#if 0
protected String toString(ColumnSpecification column)
{
return String.format("%s[%s] = %s", column.name, selector, value);
}
#endif
virtual bool is_compatible_with(shared_ptr<raw_update> other) override;
};
@@ -212,6 +209,13 @@ public:
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) override;
#if 0
protected String toString(ColumnSpecification column)
{
return String.format("%s = %s + %s", column.name, column.name, value);
}
#endif
virtual bool is_compatible_with(shared_ptr<raw_update> other) override;
};
@@ -224,6 +228,13 @@ public:
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) override;
#if 0
protected String toString(ColumnSpecification column)
{
return String.format("%s = %s - %s", column.name, column.name, value);
}
#endif
virtual bool is_compatible_with(shared_ptr<raw_update> other) override;
};
@@ -236,6 +247,12 @@ public:
virtual shared_ptr<operation> prepare(database& db, const sstring& keyspace, const column_definition& receiver) override;
#if 0
protected String toString(ColumnSpecification column)
{
return String.format("%s = %s - %s", column.name, value, column.name);
}
#endif
virtual bool is_compatible_with(shared_ptr<raw_update> other) override;
};

View File

@@ -99,9 +99,9 @@ query_options::query_options(query_options&& o, std::vector<std::vector<bytes_vi
_batch_options = std::move(tmp);
}
query_options::query_options(db::consistency_level cl, std::vector<bytes_opt> values)
query_options::query_options(std::vector<bytes_opt> values)
: query_options(
cl,
db::consistency_level::ONE,
{},
std::move(values),
{},
@@ -120,11 +120,6 @@ query_options::query_options(db::consistency_level cl, std::vector<bytes_opt> va
}
}
query_options::query_options(std::vector<bytes_opt> values)
: query_options(
db::consistency_level::ONE, std::move(values))
{}
db::consistency_level query_options::get_consistency() const
{
return _consistency;

View File

@@ -112,7 +112,6 @@ public:
// forInternalUse
explicit query_options(std::vector<bytes_opt> values);
explicit query_options(db::consistency_level, std::vector<bytes_opt> values);
db::consistency_level get_consistency() const;
bytes_view_opt get_value_at(size_t idx) const;

View File

@@ -56,15 +56,14 @@ using namespace transport::messages;
logging::logger log("query_processor");
distributed<query_processor> _the_query_processor;
const sstring query_processor::CQL_VERSION = "3.2.0";
class query_processor::internal_state {
service::client_state _cs;
service::query_state _qs;
public:
internal_state()
: _qs(service::client_state{service::client_state::internal_tag()}) {
: _cs(service::client_state::internal_tag()), _qs(_cs) {
}
operator service::query_state&() {
return _qs;
@@ -73,13 +72,14 @@ public:
return _qs;
}
operator service::client_state&() {
return _qs.get_client_state();
return _cs;
}
operator const service::client_state&() const {
return _qs.get_client_state();
return _cs;
}
api::timestamp_type next_timestamp() {
return _qs.get_client_state().get_timestamp();
return _cs.get_timestamp();
}
};
@@ -89,27 +89,15 @@ api::timestamp_type query_processor::next_timestamp() {
query_processor::query_processor(distributed<service::storage_proxy>& proxy,
distributed<database>& db)
: _migration_subscriber{std::make_unique<migration_subscriber>(this)}
, _proxy(proxy)
, _db(db)
, _internal_state(new internal_state())
{
service::get_local_migration_manager().register_listener(_migration_subscriber.get());
: _proxy(proxy), _db(db), _internal_state(new internal_state()) {
}
query_processor::~query_processor()
{}
future<> query_processor::stop()
{
service::get_local_migration_manager().unregister_listener(_migration_subscriber.get());
return make_ready_future<>();
}
future<::shared_ptr<result_message>>
query_processor::process(const sstring_view& query_string, service::query_state& query_state, query_options& options)
{
log.trace("process: \"{}\"", query_string);
auto p = get_statement(query_string, query_state.get_client_state());
options.prepare(p->bound_names);
auto cql_statement = p->statement;
@@ -179,7 +167,7 @@ query_processor::prepare(const std::experimental::string_view& query_string, con
query_processor::get_stored_prepared_statement(const std::experimental::string_view& query_string, const sstring& keyspace, bool for_thrift)
{
if (for_thrift) {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
#if 0
Integer thriftStatementId = computeThriftId(queryString, keyspace);
ParsedStatement.Prepared existing = thriftPreparedStatements.get(thriftStatementId);
@@ -210,7 +198,7 @@ query_processor::store_prepared_statement(const std::experimental::string_view&
MAX_CACHE_PREPARED_MEMORY));
#endif
if (for_thrift) {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
#if 0
Integer statementId = computeThriftId(queryString, keyspace);
thriftPreparedStatements.put(statementId, prepared);
@@ -224,11 +212,6 @@ query_processor::store_prepared_statement(const std::experimental::string_view&
}
}
void query_processor::invalidate_prepared_statement(bytes statement_id)
{
_prepared_statements.erase(statement_id);
}
static bytes md5_calculate(const std::experimental::string_view& s)
{
constexpr size_t size = CryptoPP::Weak1::MD5::DIGESTSIZE;
@@ -300,9 +283,8 @@ query_processor::parse_statement(const sstring_view& query)
}
query_options query_processor::make_internal_options(
::shared_ptr<statements::parsed_statement::prepared> p,
const std::initializer_list<data_value>& values,
db::consistency_level cl) {
::shared_ptr<statements::parsed_statement::prepared> p,
const std::initializer_list<boost::any>& values) {
if (p->bound_names.size() != values.size()) {
throw std::invalid_argument(sprint("Invalid number of values. Expecting %d but got %d", p->bound_names.size(), values.size()));
}
@@ -310,20 +292,21 @@ query_options query_processor::make_internal_options(
std::vector<bytes_opt> bound_values;
for (auto& v : values) {
auto& n = *ni++;
if (v.type() == bytes_type) {
bound_values.push_back({value_cast<bytes>(v)});
} else if (v.is_null()) {
if (v.type() == typeid(bytes)) {
bound_values.push_back({boost::any_cast<bytes>(v)});
} else if (v.empty()) {
bound_values.push_back({});
} else {
bound_values.push_back({n->type->decompose(v)});
}
}
return query_options(cl, bound_values);
return query_options(bound_values);
}
::shared_ptr<statements::parsed_statement::prepared> query_processor::prepare_internal(
const sstring& query_string) {
auto& p = _internal_statements[query_string];
const std::experimental::string_view& query_string) {
auto& p = _internal_statements[sstring(query_string.begin(), query_string.end())];
if (p == nullptr) {
auto np = parse_statement(query_string)->prepare(_db.local());
np->statement->validate(_proxy, *_internal_state);
@@ -333,54 +316,19 @@ query_options query_processor::make_internal_options(
}
future<::shared_ptr<untyped_result_set>> query_processor::execute_internal(
const sstring& query_string,
const std::initializer_list<data_value>& values) {
if (log.is_enabled(logging::log_level::trace)) {
log.trace("execute_internal: \"{}\" ({})", query_string, ::join(", ", values));
}
const std::experimental::string_view& query_string,
const std::initializer_list<boost::any>& values) {
auto p = prepare_internal(query_string);
return execute_internal(p, values);
}
future<::shared_ptr<untyped_result_set>> query_processor::execute_internal(
::shared_ptr<statements::parsed_statement::prepared> p,
const std::initializer_list<data_value>& values) {
auto opts = make_internal_options(p, values);
return do_with(std::move(opts),
[this, p = std::move(p)](query_options & opts) {
return p->statement->execute_internal(_proxy, *_internal_state, opts).then(
[p](::shared_ptr<transport::messages::result_message> msg) {
[](::shared_ptr<transport::messages::result_message> msg) {
return make_ready_future<::shared_ptr<untyped_result_set>>(::make_shared<untyped_result_set>(msg));
});
});
}
future<::shared_ptr<untyped_result_set>> query_processor::process(
const sstring& query_string,
db::consistency_level cl, const std::initializer_list<data_value>& values, bool cache)
{
auto p = cache ? prepare_internal(query_string) : parse_statement(query_string)->prepare(_db.local());
if (!cache) {
p->statement->validate(_proxy, *_internal_state);
}
return process(p, cl, values);
}
future<::shared_ptr<untyped_result_set>> query_processor::process(
::shared_ptr<statements::parsed_statement::prepared> p,
db::consistency_level cl, const std::initializer_list<data_value>& values)
{
auto opts = make_internal_options(p, values, cl);
return do_with(std::move(opts),
[this, p = std::move(p)](query_options & opts) {
return p->statement->execute(_proxy, *_internal_state, opts).then(
[p](::shared_ptr<transport::messages::result_message> msg) {
return make_ready_future<::shared_ptr<untyped_result_set>>(::make_shared<untyped_result_set>(msg));
});
});
}
future<::shared_ptr<transport::messages::result_message>>
query_processor::process_batch(::shared_ptr<statements::batch_statement> batch, service::query_state& query_state, query_options& options) {
auto& client_state = query_state.get_client_state();
@@ -390,99 +338,5 @@ query_processor::process_batch(::shared_ptr<statements::batch_statement> batch,
return batch->execute(_proxy, query_state, options);
}
query_processor::migration_subscriber::migration_subscriber(query_processor* qp)
: _qp{qp}
{
}
void query_processor::migration_subscriber::on_create_keyspace(const sstring& ks_name)
{
}
void query_processor::migration_subscriber::on_create_column_family(const sstring& ks_name, const sstring& cf_name)
{
}
void query_processor::migration_subscriber::on_create_user_type(const sstring& ks_name, const sstring& type_name)
{
}
void query_processor::migration_subscriber::on_create_function(const sstring& ks_name, const sstring& function_name)
{
log.warn("{} event ignored", __func__);
}
void query_processor::migration_subscriber::on_create_aggregate(const sstring& ks_name, const sstring& aggregate_name)
{
log.warn("{} event ignored", __func__);
}
void query_processor::migration_subscriber::on_update_keyspace(const sstring& ks_name)
{
}
void query_processor::migration_subscriber::on_update_column_family(const sstring& ks_name, const sstring& cf_name, bool columns_changed)
{
if (columns_changed) {
log.info("Column definitions for {}.{} changed, invalidating related prepared statements", ks_name, cf_name);
remove_invalid_prepared_statements(ks_name, cf_name);
}
}
void query_processor::migration_subscriber::on_update_user_type(const sstring& ks_name, const sstring& type_name)
{
}
void query_processor::migration_subscriber::on_update_function(const sstring& ks_name, const sstring& function_name)
{
}
void query_processor::migration_subscriber::on_update_aggregate(const sstring& ks_name, const sstring& aggregate_name)
{
}
void query_processor::migration_subscriber::on_drop_keyspace(const sstring& ks_name)
{
remove_invalid_prepared_statements(ks_name, std::experimental::nullopt);
}
void query_processor::migration_subscriber::on_drop_column_family(const sstring& ks_name, const sstring& cf_name)
{
remove_invalid_prepared_statements(ks_name, cf_name);
}
void query_processor::migration_subscriber::on_drop_user_type(const sstring& ks_name, const sstring& type_name)
{
}
void query_processor::migration_subscriber::on_drop_function(const sstring& ks_name, const sstring& function_name)
{
log.warn("{} event ignored", __func__);
}
void query_processor::migration_subscriber::on_drop_aggregate(const sstring& ks_name, const sstring& aggregate_name)
{
log.warn("{} event ignored", __func__);
}
void query_processor::migration_subscriber::remove_invalid_prepared_statements(sstring ks_name, std::experimental::optional<sstring> cf_name)
{
std::vector<bytes> invalid;
for (auto& kv : _qp->_prepared_statements) {
auto id = kv.first;
auto stmt = kv.second;
if (should_invalidate(ks_name, cf_name, stmt->statement)) {
invalid.emplace_back(id);
}
}
for (auto& id : invalid) {
_qp->invalidate_prepared_statement(id);
}
}
bool query_processor::migration_subscriber::should_invalidate(sstring ks_name, std::experimental::optional<sstring> cf_name, ::shared_ptr<cql_statement> statement)
{
return statement->depends_on_keyspace(ks_name) && (!cf_name || statement->depends_on_column_family(*cf_name));
}
}

View File

@@ -48,7 +48,6 @@
#include "exceptions/exceptions.hh"
#include "cql3/query_options.hh"
#include "cql3/statements/cf_statement.hh"
#include "service/migration_manager.hh"
#include "service/query_state.hh"
#include "log.hh"
#include "core/distributed.hh"
@@ -62,10 +61,7 @@ class batch_statement;
}
class query_processor {
public:
class migration_subscriber;
private:
std::unique_ptr<migration_subscriber> _migration_subscriber;
distributed<service::storage_proxy>& _proxy;
distributed<database>& _db;
@@ -322,24 +318,13 @@ public:
}
#endif
private:
query_options make_internal_options(::shared_ptr<statements::parsed_statement::prepared>, const std::initializer_list<data_value>&, db::consistency_level = db::consistency_level::ONE);
::shared_ptr<statements::parsed_statement::prepared> prepare_internal(const std::experimental::string_view& query);
query_options make_internal_options(::shared_ptr<statements::parsed_statement::prepared>, const std::initializer_list<boost::any>&);
public:
future<::shared_ptr<untyped_result_set>> execute_internal(
const sstring& query_string,
const std::initializer_list<data_value>& = { });
::shared_ptr<statements::parsed_statement::prepared> prepare_internal(const sstring& query);
future<::shared_ptr<untyped_result_set>> execute_internal(
::shared_ptr<statements::parsed_statement::prepared>,
const std::initializer_list<data_value>& = { });
future<::shared_ptr<untyped_result_set>> process(
const sstring& query_string,
db::consistency_level, const std::initializer_list<data_value>& = { }, bool cache = false);
future<::shared_ptr<untyped_result_set>> process(
::shared_ptr<statements::parsed_statement::prepared>,
db::consistency_level, const std::initializer_list<data_value>& = { });
const std::experimental::string_view& query_string,
const std::initializer_list<boost::any>& = { });
/*
* This function provides a timestamp that is guaranteed to be higher than any timestamp
@@ -438,8 +423,6 @@ private:
future<::shared_ptr<transport::messages::result_message::prepared>>
store_prepared_statement(const std::experimental::string_view& query_string, const sstring& keyspace, ::shared_ptr<statements::parsed_statement::prepared> prepared, bool for_thrift);
void invalidate_prepared_statement(bytes statement_id);
#if 0
public ResultMessage processPrepared(CQLStatement statement, QueryState queryState, QueryOptions options)
throws RequestExecutionException, RequestValidationException
@@ -478,48 +461,111 @@ public:
{
return meter.measureDeep(key);
}
private static class MigrationSubscriber implements IMigrationListener
{
private void removeInvalidPreparedStatements(String ksName, String cfName)
{
removeInvalidPreparedStatements(preparedStatements.values().iterator(), ksName, cfName);
removeInvalidPreparedStatements(thriftPreparedStatements.values().iterator(), ksName, cfName);
}
private void removeInvalidPreparedStatements(Iterator<ParsedStatement.Prepared> iterator, String ksName, String cfName)
{
while (iterator.hasNext())
{
if (shouldInvalidate(ksName, cfName, iterator.next().statement))
iterator.remove();
}
}
private boolean shouldInvalidate(String ksName, String cfName, CQLStatement statement)
{
String statementKsName;
String statementCfName;
if (statement instanceof ModificationStatement)
{
ModificationStatement modificationStatement = ((ModificationStatement) statement);
statementKsName = modificationStatement.keyspace();
statementCfName = modificationStatement.columnFamily();
}
else if (statement instanceof SelectStatement)
{
SelectStatement selectStatement = ((SelectStatement) statement);
statementKsName = selectStatement.keyspace();
statementCfName = selectStatement.columnFamily();
}
else
{
return false;
}
return ksName.equals(statementKsName) && (cfName == null || cfName.equals(statementCfName));
}
public void onCreateKeyspace(String ksName) { }
public void onCreateColumnFamily(String ksName, String cfName) { }
public void onCreateUserType(String ksName, String typeName) { }
public void onCreateFunction(String ksName, String functionName) {
if (Functions.getOverloadCount(new FunctionName(ksName, functionName)) > 1)
{
// in case there are other overloads, we have to remove all overloads since argument type
// matching may change (due to type casting)
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, functionName);
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, functionName);
}
}
public void onCreateAggregate(String ksName, String aggregateName) {
if (Functions.getOverloadCount(new FunctionName(ksName, aggregateName)) > 1)
{
// in case there are other overloads, we have to remove all overloads since argument type
// matching may change (due to type casting)
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, aggregateName);
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, aggregateName);
}
}
public void onUpdateKeyspace(String ksName) { }
public void onUpdateColumnFamily(String ksName, String cfName) { }
public void onUpdateUserType(String ksName, String typeName) { }
public void onUpdateFunction(String ksName, String functionName) { }
public void onUpdateAggregate(String ksName, String aggregateName) { }
public void onDropKeyspace(String ksName)
{
removeInvalidPreparedStatements(ksName, null);
}
public void onDropColumnFamily(String ksName, String cfName)
{
removeInvalidPreparedStatements(ksName, cfName);
}
public void onDropUserType(String ksName, String typeName) { }
public void onDropFunction(String ksName, String functionName) {
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, functionName);
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, functionName);
}
public void onDropAggregate(String ksName, String aggregateName)
{
removeInvalidPreparedStatementsForFunction(preparedStatements.values().iterator(), ksName, aggregateName);
removeInvalidPreparedStatementsForFunction(thriftPreparedStatements.values().iterator(), ksName, aggregateName);
}
private void removeInvalidPreparedStatementsForFunction(Iterator<ParsedStatement.Prepared> iterator,
String ksName, String functionName)
{
while (iterator.hasNext())
if (iterator.next().statement.usesFunction(ksName, functionName))
iterator.remove();
}
}
#endif
public:
future<> stop();
friend class migration_subscriber;
future<> stop() {
return make_ready_future<>();
}
};
class query_processor::migration_subscriber : public service::migration_listener {
query_processor* _qp;
public:
migration_subscriber(query_processor* qp);
virtual void on_create_keyspace(const sstring& ks_name) override;
virtual void on_create_column_family(const sstring& ks_name, const sstring& cf_name) override;
virtual void on_create_user_type(const sstring& ks_name, const sstring& type_name) override;
virtual void on_create_function(const sstring& ks_name, const sstring& function_name) override;
virtual void on_create_aggregate(const sstring& ks_name, const sstring& aggregate_name) override;
virtual void on_update_keyspace(const sstring& ks_name) override;
virtual void on_update_column_family(const sstring& ks_name, const sstring& cf_name, bool columns_changed) override;
virtual void on_update_user_type(const sstring& ks_name, const sstring& type_name) override;
virtual void on_update_function(const sstring& ks_name, const sstring& function_name) override;
virtual void on_update_aggregate(const sstring& ks_name, const sstring& aggregate_name) override;
virtual void on_drop_keyspace(const sstring& ks_name) override;
virtual void on_drop_column_family(const sstring& ks_name, const sstring& cf_name) override;
virtual void on_drop_user_type(const sstring& ks_name, const sstring& type_name) override;
virtual void on_drop_function(const sstring& ks_name, const sstring& function_name) override;
virtual void on_drop_aggregate(const sstring& ks_name, const sstring& aggregate_name) override;
private:
void remove_invalid_prepared_statements(sstring ks_name, std::experimental::optional<sstring> cf_name);
bool should_invalidate(sstring ks_name, std::experimental::optional<sstring> cf_name, ::shared_ptr<cql_statement> statement);
};
extern distributed<query_processor> _the_query_processor;
inline distributed<query_processor>& get_query_processor() {
return _the_query_processor;
}
inline query_processor& get_local_query_processor() {
return _the_query_processor.local();
}
}

View File

@@ -374,7 +374,7 @@ public:
}
virtual std::vector<bytes_opt> bounds(statements::bound b, const query_options& options) const override {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
#if 0
return Composites.toByteBuffers(boundsAsComposites(b, options));
#endif

View File

@@ -41,13 +41,13 @@ public:
::shared_ptr<primary_key_restrictions<T>> do_merge_to(schema_ptr schema, ::shared_ptr<restriction> restriction) const {
if (restriction->is_multi_column()) {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
}
return ::make_shared<single_column_primary_key_restrictions<T>>(schema)->merge_to(schema, restriction);
}
::shared_ptr<primary_key_restrictions<T>> merge_to(schema_ptr schema, ::shared_ptr<restriction> restriction) override {
if (restriction->is_multi_column()) {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
}
if (restriction->is_on_token()) {
return static_pointer_cast<token_restriction>(restriction);

View File

@@ -80,7 +80,7 @@ public:
private:
const uint32_t _column_count;
::shared_ptr<const service::pager::paging_state> _paging_state;
::shared_ptr<service::pager::paging_state> _paging_state;
public:
metadata(std::vector<::shared_ptr<column_specification>> names_)
@@ -88,7 +88,7 @@ public:
{ }
metadata(flag_enum_set flags, std::vector<::shared_ptr<column_specification>> names_, uint32_t column_count,
::shared_ptr<const service::pager::paging_state> paging_state)
::shared_ptr<service::pager::paging_state> paging_state)
: _flags(flags)
, names(std::move(names_))
, _column_count(column_count)
@@ -121,7 +121,7 @@ private:
}
public:
void set_has_more_pages(::shared_ptr<const service::pager::paging_state> paging_state) {
void set_has_more_pages(::shared_ptr<service::pager::paging_state> paging_state) {
if (!paging_state) {
return;
}
@@ -342,10 +342,6 @@ public:
std::sort(_rows.begin(), _rows.end(), std::forward<RowComparator>(cmp));
}
metadata& get_metadata() {
return *_metadata;
}
const metadata& get_metadata() const {
return *_metadata;
}

View File

@@ -125,7 +125,7 @@ protected:
}
};
std::unique_ptr<selectors> new_selectors() const override {
std::unique_ptr<selectors> new_selectors() {
return std::make_unique<simple_selectors>();
}
};
@@ -196,7 +196,7 @@ protected:
}
};
std::unique_ptr<selectors> new_selectors() const override {
std::unique_ptr<selectors> new_selectors() {
return std::make_unique<selectors_with_processing>(_factories);
}
};
@@ -252,7 +252,7 @@ selection::collect_metadata(schema_ptr schema, const std::vector<::shared_ptr<ra
return r;
}
result_set_builder::result_set_builder(const selection& s, db_clock::time_point now, serialization_format sf)
result_set_builder::result_set_builder(selection& s, db_clock::time_point now, serialization_format sf)
: _result_set(std::make_unique<result_set>(::make_shared<metadata>(*(s.get_result_metadata()))))
, _selectors(s.new_selectors())
, _now(now)
@@ -295,7 +295,7 @@ void result_set_builder::add(const column_definition& def, const query::result_a
}
}
void result_set_builder::add(const column_definition& def, collection_mutation_view c) {
void result_set_builder::add(const column_definition& def, collection_mutation::view c) {
auto&& ctype = static_cast<const collection_type_impl*>(def.type.get());
current->emplace_back(ctype->to_value(c, _serialization_format));
// timestamps, ttls meaningless for collections
@@ -330,98 +330,6 @@ std::unique_ptr<result_set> result_set_builder::build() {
return std::move(_result_set);
}
result_set_builder::visitor::visitor(
cql3::selection::result_set_builder& builder, const schema& s,
const selection& selection)
: _builder(builder), _schema(s), _selection(selection), _row_count(0) {
}
void result_set_builder::visitor::add_value(const column_definition& def,
query::result_row_view::iterator_type& i) {
if (def.type->is_multi_cell()) {
auto cell = i.next_collection_cell();
if (!cell) {
_builder.add_empty();
return;
}
_builder.add(def, *cell);
} else {
auto cell = i.next_atomic_cell();
if (!cell) {
_builder.add_empty();
return;
}
_builder.add(def, *cell);
}
}
void result_set_builder::visitor::accept_new_partition(const partition_key& key,
uint32_t row_count) {
_partition_key = key.explode(_schema);
_row_count = row_count;
}
void result_set_builder::visitor::accept_new_partition(uint32_t row_count) {
_row_count = row_count;
}
void result_set_builder::visitor::accept_new_row(const clustering_key& key,
const query::result_row_view& static_row,
const query::result_row_view& row) {
_clustering_key = key.explode(_schema);
accept_new_row(static_row, row);
}
void result_set_builder::visitor::accept_new_row(
const query::result_row_view& static_row,
const query::result_row_view& row) {
auto static_row_iterator = static_row.iterator();
auto row_iterator = row.iterator();
_builder.new_row();
for (auto&& def : _selection.get_columns()) {
switch (def->kind) {
case column_kind::partition_key:
_builder.add(_partition_key[def->component_index()]);
break;
case column_kind::clustering_key:
if (_clustering_key.size() > def->component_index()) {
_builder.add(_clustering_key[def->component_index()]);
} else {
_builder.add({});
}
break;
case column_kind::regular_column:
add_value(*def, row_iterator);
break;
case column_kind::compact_column:
add_value(*def, row_iterator);
break;
case column_kind::static_column:
add_value(*def, static_row_iterator);
break;
default:
assert(0);
}
}
}
void result_set_builder::visitor::accept_partition_end(
const query::result_row_view& static_row) {
if (_row_count == 0) {
_builder.new_row();
auto static_row_iterator = static_row.iterator();
for (auto&& def : _selection.get_columns()) {
if (def->is_partition_key()) {
_builder.add(_partition_key[def->component_index()]);
} else if (def->is_static()) {
add_value(*def, static_row_iterator);
} else {
_builder.add_empty();
}
}
}
}
api::timestamp_type result_set_builder::timestamp_of(size_t idx) {
return _timestamps[idx];
}

View File

@@ -134,7 +134,7 @@ public:
* @return <code>true</code> if this selection contains a collection, <code>false</code> otherwise.
*/
bool contains_a_collection() const {
if (!_schema->has_multi_cell_collections()) {
if (!_schema->has_collections()) {
return false;
}
@@ -161,7 +161,7 @@ public:
return std::find(_columns.begin(), _columns.end(), &def) != _columns.end();
}
::shared_ptr<metadata> get_result_metadata() const {
::shared_ptr<metadata> get_result_metadata() {
return _metadata;
}
@@ -186,16 +186,16 @@ private:
public:
static ::shared_ptr<selection> from_selectors(database& db, schema_ptr schema, const std::vector<::shared_ptr<raw_selector>>& raw_selectors);
virtual std::unique_ptr<selectors> new_selectors() const = 0;
virtual std::unique_ptr<selectors> new_selectors() = 0;
/**
* Returns a range of CQL3 columns this selection needs.
*/
auto const& get_columns() const {
auto const& get_columns() {
return _columns;
}
uint32_t get_column_count() const {
uint32_t get_column_count() {
return _columns.size();
}
@@ -238,39 +238,15 @@ private:
const db_clock::time_point _now;
serialization_format _serialization_format;
public:
result_set_builder(const selection& s, db_clock::time_point now, serialization_format sf);
result_set_builder(selection& s, db_clock::time_point now, serialization_format sf);
void add_empty();
void add(bytes_opt value);
void add(const column_definition& def, const query::result_atomic_cell_view& c);
void add(const column_definition& def, collection_mutation_view c);
void add(const column_definition& def, collection_mutation::view c);
void new_row();
std::unique_ptr<result_set> build();
api::timestamp_type timestamp_of(size_t idx);
int32_t ttl_of(size_t idx);
// Implements ResultVisitor concept from query.hh
class visitor {
protected:
result_set_builder& _builder;
const schema& _schema;
const selection& _selection;
uint32_t _row_count;
std::vector<bytes> _partition_key;
std::vector<bytes> _clustering_key;
public:
visitor(cql3::selection::result_set_builder& builder, const schema& s, const selection&);
visitor(visitor&&) = default;
void add_value(const column_definition& def, query::result_row_view::iterator_type& i);
void accept_new_partition(const partition_key& key, uint32_t row_count);
void accept_new_partition(uint32_t row_count);
void accept_new_row(const clustering_key& key,
const query::result_row_view& static_row,
const query::result_row_view& row);
void accept_new_row(const query::result_row_view& static_row,
const query::result_row_view& row);
void accept_partition_end(const query::result_row_view& static_row);
};
private:
bytes_opt get_value(data_type t, query::result_atomic_cell_view c);
};

View File

@@ -125,7 +125,7 @@ sets::value::from_serialized(bytes_view v, set_type type, serialization_format s
// Collections have this small hack that validate cannot be called on a serialized object,
// but compose does the validation (so we're fine).
// FIXME: deserializeForNativeProtocol?!
auto s = value_cast<set_type_impl::native_type>(type->deserialize(v, sf));
auto s = boost::any_cast<set_type_impl::native_type>(type->deserialize(v, sf));
std::set<bytes, serialized_compare> elements(type->get_elements_type()->as_less_comparator());
for (auto&& element : s) {
elements.insert(elements.end(), type->get_elements_type()->decompose(element));
@@ -284,11 +284,16 @@ sets::discarder::execute(mutation& m, const exploded_clustering_prefix& row_key,
auto kill = [&] (bytes idx) {
mut.cells.push_back({std::move(idx), params.make_dead_cell()});
};
auto svalue = dynamic_pointer_cast<sets::value>(value);
assert(svalue);
mut.cells.reserve(svalue->_elements.size());
for (auto&& e : svalue->_elements) {
kill(e);
// This can be either a set or a single element
auto cvalue = dynamic_pointer_cast<constants::value>(value);
if (cvalue) {
kill(cvalue->_bytes ? *cvalue->_bytes : bytes());
} else {
auto svalue = static_pointer_cast<sets::value>(value);
mut.cells.reserve(svalue->_elements.size());
for (auto&& e : svalue->_elements) {
kill(e);
}
}
auto ctype = static_pointer_cast<const collection_type_impl>(column.type);
m.set_cell(row_key, column,
@@ -296,17 +301,4 @@ sets::discarder::execute(mutation& m, const exploded_clustering_prefix& row_key,
ctype->serialize_mutation_form(mut)));
}
void sets::element_discarder::execute(mutation& m, const exploded_clustering_prefix& row_key, const update_parameters& params)
{
assert(column.type->is_multi_cell() && "Attempted to remove items from a frozen set");
auto elt = _t->bind(params._options);
if (!elt) {
throw exceptions::invalid_request_exception("Invalid null set element");
}
collection_type_impl::mutation mut;
mut.cells.emplace_back(*elt->get(params._options), params.make_dead_cell());
auto ctype = static_pointer_cast<const collection_type_impl>(column.type);
m.set_cell(row_key, column, ctype->serialize_mutation_form(mut));
}
}

View File

@@ -133,13 +133,6 @@ public:
}
virtual void execute(mutation& m, const exploded_clustering_prefix& row_key, const update_parameters& params) override;
};
class element_discarder : public operation {
public:
element_discarder(const column_definition& column, shared_ptr<term> t)
: operation(column, std::move(t)) { }
virtual void execute(mutation& m, const exploded_clustering_prefix& row_key, const update_parameters& params) override;
};
};
}

View File

@@ -159,7 +159,7 @@ protected:
virtual shared_ptr<restrictions::restriction> new_contains_restriction(database& db, schema_ptr schema,
::shared_ptr<variable_specifications> bound_names,
bool is_key) override {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw std::runtime_error("not implemented");
#if 0
ColumnDefinition columnDef = toColumnDefinition(schema, entity);
Term term = toTerm(toReceivers(schema, columnDef), value, schema.ksName, bound_names);

View File

@@ -1,278 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2015 ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cql3/statements/alter_table_statement.hh"
#include "service/migration_manager.hh"
#include "validation.hh"
#include "db/config.hh"
namespace cql3 {
namespace statements {
alter_table_statement::alter_table_statement(shared_ptr<cf_name> name,
type t,
shared_ptr<column_identifier::raw> column_name,
shared_ptr<cql3_type::raw> validator,
shared_ptr<cf_prop_defs> properties,
renames_type renames,
bool is_static)
: schema_altering_statement(std::move(name))
, _type(t)
, _raw_column_name(std::move(column_name))
, _validator(std::move(validator))
, _properties(std::move(properties))
, _renames(std::move(renames))
, _is_static(is_static)
{
}
void alter_table_statement::check_access(const service::client_state& state)
{
warn(unimplemented::cause::PERMISSIONS);
#if 0
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.ALTER);
#endif
}
void alter_table_statement::validate(distributed<service::storage_proxy>& proxy, const service::client_state& state)
{
// validated in announce_migration()
}
static const sstring ALTER_TABLE_FEATURE = "ALTER TABLE";
future<bool> alter_table_statement::announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only)
{
auto& db = proxy.local().get_db().local();
db.get_config().check_experimental(ALTER_TABLE_FEATURE);
auto schema = validation::validate_column_family(db, keyspace(), column_family());
auto cfm = schema_builder(schema);
shared_ptr<cql3_type> validator;
if (_validator) {
validator = _validator->prepare(db, keyspace());
}
shared_ptr<column_identifier> column_name;
const column_definition* def = nullptr;
if (_raw_column_name) {
column_name = _raw_column_name->prepare_column_identifier(schema);
def = get_column_definition(schema, *column_name);
}
switch (_type) {
case alter_table_statement::type::add:
{
assert(column_name);
if (schema->is_dense()) {
throw exceptions::invalid_request_exception("Cannot add new column to a COMPACT STORAGE table");
}
if (_is_static) {
if (!schema->is_compound()) {
throw exceptions::invalid_request_exception("Static columns are not allowed in COMPACT STORAGE tables");
}
if (!schema->clustering_key_size()) {
throw exceptions::invalid_request_exception("Static columns are only useful (and thus allowed) if the table has at least one clustering column");
}
}
if (def) {
if (def->is_partition_key()) {
throw exceptions::invalid_request_exception(sprint("Invalid column name %s because it conflicts with a PRIMARY KEY part", column_name));
} else {
throw exceptions::invalid_request_exception(sprint("Invalid column name %s because it conflicts with an existing column", column_name));
}
}
// Cannot re-add a dropped counter column. See #7831.
if (schema->is_counter() && schema->dropped_columns().count(column_name->text())) {
throw exceptions::invalid_request_exception(sprint("Cannot re-add previously dropped counter column %s", column_name));
}
auto type = validator->get_type();
if (type->is_collection() && type->is_multi_cell()) {
if (!schema->is_compound()) {
throw exceptions::invalid_request_exception("Cannot use non-frozen collections with a non-composite PRIMARY KEY");
}
if (schema->is_super()) {
throw exceptions::invalid_request_exception("Cannot use non-frozen collections with super column families");
}
}
cfm.with_column(column_name->name(), type, _is_static ? column_kind::static_column : column_kind::regular_column);
break;
}
case alter_table_statement::type::alter:
{
assert(column_name);
if (!def) {
throw exceptions::invalid_request_exception(sprint("Column %s was not found in table %s", column_name, column_family()));
}
auto type = validator->get_type();
switch (def->kind) {
case column_kind::partition_key:
if (type->is_counter()) {
throw exceptions::invalid_request_exception(sprint("counter type is not supported for PRIMARY KEY part %s", column_name));
}
if (!type->is_value_compatible_with(*def->type)) {
throw exceptions::configuration_exception(sprint("Cannot change %s from type %s to type %s: types are incompatible.",
column_name,
def->type->as_cql3_type(),
validator));
}
break;
case column_kind::clustering_key:
if (!schema->is_cql3_table()) {
throw exceptions::invalid_request_exception(sprint("Cannot alter clustering column %s in a non-CQL3 table", column_name));
}
// Note that CFMetaData.validateCompatibility already validate the change we're about to do. However, the error message it
// sends is a bit cryptic for a CQL3 user, so validating here for a sake of returning a better error message
// Do note that we need isCompatibleWith here, not just isValueCompatibleWith.
if (!type->is_compatible_with(*def->type)) {
throw exceptions::configuration_exception(sprint("Cannot change %s from type %s to type %s: types are not order-compatible.",
column_name,
def->type->as_cql3_type(),
validator));
}
break;
case column_kind::compact_column:
case column_kind::regular_column:
case column_kind::static_column:
// Thrift allows to change a column validator so CFMetaData.validateCompatibility will let it slide
// if we change to an incompatible type (contrarily to the comparator case). But we don't want to
// allow it for CQL3 (see #5882) so validating it explicitly here. We only care about value compatibility
// though since we won't compare values (except when there is an index, but that is validated by
// ColumnDefinition already).
if (!type->is_value_compatible_with(*def->type)) {
throw exceptions::configuration_exception(sprint("Cannot change %s from type %s to type %s: types are incompatible.",
column_name,
def->type->as_cql3_type(),
validator));
}
break;
}
// In any case, we update the column definition
cfm.with_altered_column_type(column_name->name(), type);
break;
}
case alter_table_statement::type::drop:
assert(column_name);
if (!schema->is_cql3_table()) {
throw exceptions::invalid_request_exception("Cannot drop columns from a non-CQL3 table");
}
if (!def) {
throw exceptions::invalid_request_exception(sprint("Column %s was not found in table %s", column_name, column_family()));
}
if (def->is_primary_key()) {
throw exceptions::invalid_request_exception(sprint("Cannot drop PRIMARY KEY part %s", column_name));
} else {
for (auto&& column_def : boost::range::join(schema->static_columns(), schema->regular_columns())) { // find
if (column_def.name() == column_name->name()) {
cfm.without_column(column_name->name());
break;
}
}
}
break;
case alter_table_statement::type::opts:
if (!_properties) {
throw exceptions::invalid_request_exception("ALTER COLUMNFAMILY WITH invoked, but no parameters found");
}
_properties->validate();
if (schema->is_counter() && _properties->get_default_time_to_live() > 0) {
throw exceptions::invalid_request_exception("Cannot set default_time_to_live on a table with counters");
}
_properties->apply_to_builder(cfm);
break;
case alter_table_statement::type::rename:
for (auto&& entry : _renames) {
auto from = entry.first->prepare_column_identifier(schema);
auto to = entry.second->prepare_column_identifier(schema);
auto def = schema->get_column_definition(from->name());
if (!def) {
throw exceptions::invalid_request_exception(sprint("Cannot rename unknown column %s in table %s", from, column_family()));
}
if (schema->get_column_definition(to->name())) {
throw exceptions::invalid_request_exception(sprint("Cannot rename column %s to %s in table %s; another column of that name already exist", from, to, column_family()));
}
if (def->is_part_of_cell_name()) {
throw exceptions::invalid_request_exception(sprint("Cannot rename non PRIMARY KEY part %s", from));
}
if (def->is_indexed()) {
throw exceptions::invalid_request_exception(sprint("Cannot rename column %s because it is secondary indexed", from));
}
cfm.with_column_rename(from->name(), to->name());
}
break;
}
return service::get_local_migration_manager().announce_column_family_update(cfm.build(), false, is_local_only).then([] {
return true;
});
}
shared_ptr<transport::event::schema_change> alter_table_statement::change_event()
{
return make_shared<transport::event::schema_change>(transport::event::schema_change::change_type::UPDATED,
transport::event::schema_change::target_type::TABLE, keyspace(), column_family());
}
}
}

View File

@@ -1,87 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2015 ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "cql3/statements/schema_altering_statement.hh"
#include "cql3/statements/cf_prop_defs.hh"
#include "cql3/cql3_type.hh"
namespace cql3 {
namespace statements {
class alter_table_statement : public schema_altering_statement {
public:
enum class type {
add,
alter,
drop,
opts,
rename,
};
using renames_type = std::vector<std::pair<shared_ptr<column_identifier::raw>,
shared_ptr<column_identifier::raw>>>;
private:
const type _type;
const shared_ptr<column_identifier::raw> _raw_column_name;
const shared_ptr<cql3_type::raw> _validator;
const shared_ptr<cf_prop_defs> _properties;
const renames_type _renames;
const bool _is_static;
public:
alter_table_statement(shared_ptr<cf_name> name,
type t,
shared_ptr<column_identifier::raw> column_name,
shared_ptr<cql3_type::raw> validator,
shared_ptr<cf_prop_defs> properties,
renames_type renames,
bool is_static);
virtual void check_access(const service::client_state& state) override;
virtual void validate(distributed<service::storage_proxy>& proxy, const service::client_state& state) override;
virtual future<bool> announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only) override;
virtual shared_ptr<transport::event::schema_change> change_event() override;
};
}
}

View File

@@ -38,7 +38,6 @@
*/
#include "batch_statement.hh"
#include "db/config.hh"
namespace cql3 {
@@ -46,60 +45,6 @@ namespace statements {
logging::logger batch_statement::_logger("BatchStatement");
bool batch_statement::depends_on_keyspace(const sstring& ks_name) const
{
return false;
}
bool batch_statement::depends_on_column_family(const sstring& cf_name) const
{
return false;
}
void batch_statement::verify_batch_size(const std::vector<mutation>& mutations) {
size_t warn_threshold = service::get_local_storage_proxy().get_db().local().get_config().batch_size_warn_threshold_in_kb();
class my_partition_visitor : public mutation_partition_visitor {
public:
void accept_partition_tombstone(tombstone) override {}
void accept_static_cell(column_id, atomic_cell_view v) override {
size += v.value().size();
}
void accept_static_cell(column_id, collection_mutation_view v) override {
size += v.data.size();
}
void accept_row_tombstone(clustering_key_prefix_view, tombstone) override {}
void accept_row(clustering_key_view, tombstone, const row_marker&) override {}
void accept_row_cell(column_id, atomic_cell_view v) override {
size += v.value().size();
}
void accept_row_cell(column_id id, collection_mutation_view v) override {
size += v.data.size();
}
size_t size = 0;
};
my_partition_visitor v;
for (auto&m : mutations) {
m.partition().accept(*m.schema(), v);
}
auto size = v.size / 1024;
if (v.size > warn_threshold) {
std::unordered_set<sstring> ks_cf_pairs;
for (auto&& m : mutations) {
ks_cf_pairs.insert(m.schema()->ks_name() + "." + m.schema()->cf_name());
}
_logger.warn(
"Batch of prepared statements for {} is of size {}, exceeding specified threshold of {} by {}.{}",
join(", ", ks_cf_pairs), size, warn_threshold,
size - warn_threshold, "");
}
}
}
}

View File

@@ -96,10 +96,6 @@ public:
|| boost::algorithm::any_of(_statements, [&] (auto&& s) { return s->uses_function(ks_name, function_name); });
}
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
virtual bool depends_on_column_family(const sstring& cf_name) const override;
virtual uint32_t get_bound_terms() override {
return _bound_terms;
}
@@ -196,8 +192,27 @@ public:
* Checks batch size to ensure threshold is met. If not, a warning is logged.
* @param cfs ColumnFamilies that will store the batch's mutations.
*/
static void verify_batch_size(const std::vector<mutation>& mutations);
static void verify_batch_size(const std::vector<mutation>& mutations) {
size_t warn_threshold = 1000; // FIXME: database_descriptor::get_batch_size_warn_threshold();
size_t fail_threshold = 2000; // FIXME: database_descriptor::get_batch_size_fail_threshold();
size_t size = mutations.size();
if (size > warn_threshold) {
std::unordered_set<sstring> ks_cf_pairs;
for (auto&& m : mutations) {
ks_cf_pairs.insert(m.schema()->ks_name() + "." + m.schema()->cf_name());
}
const char* format = "Batch of prepared statements for {} is of size {}, exceeding specified threshold of {} by {}.{}";
if (size > fail_threshold) {
// FIXME: Tracing.trace(format, new Object[] {ksCfPairs, size, failThreshold, size - failThreshold, " (see batch_size_fail_threshold_in_kb)"});
_logger.error(format, join(", ", ks_cf_pairs), size, fail_threshold, size - fail_threshold, " (see batch_size_fail_threshold_in_kb)");
throw exceptions::invalid_request_exception("Batch too large");
} else {
_logger.warn(format, join(", ", ks_cf_pairs), size, warn_threshold, size - warn_threshold, "");
}
}
}
virtual future<shared_ptr<transport::messages::result_message>> execute(
distributed<service::storage_proxy>& storage, service::query_state& state, const query_options& options) override {
return execute(storage, state, options, false, options.get_timestamp(state));
@@ -303,7 +318,7 @@ public:
virtual future<shared_ptr<transport::messages::result_message>> execute_internal(
distributed<service::storage_proxy>& proxy,
service::query_state& query_state, const query_options& options) override {
throw std::runtime_error(sprint("%s not implemented", __PRETTY_FUNCTION__));
throw "not implemented";
#if 0
assert !hasConditions;
for (IMutation mutation : getMutations(BatchQueryOptions.withoutPerStatementVariables(options), true, queryState.getTimestamp()))

View File

@@ -139,11 +139,6 @@ std::map<sstring, sstring> cf_prop_defs::get_compression_options() const {
return std::map<sstring, sstring>{};
}
int32_t cf_prop_defs::get_default_time_to_live() const
{
return get_int(KW_DEFAULT_TIME_TO_LIVE, 0);
}
void cf_prop_defs::apply_to_builder(schema_builder& builder) {
if (has_property(KW_COMMENT)) {
builder.set_comment(get_string(KW_COMMENT, ""));

View File

@@ -100,8 +100,6 @@ public:
return options;
}
#endif
int32_t get_default_time_to_live() const;
void apply_to_builder(schema_builder& builder);
void validate_minimum_int(const sstring& field, int32_t minimum_value, int32_t default_value) const;
};

View File

@@ -1,83 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2014-2015 ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cql3/statements/cf_statement.hh"
namespace cql3 {
namespace statements {
cf_statement::cf_statement(::shared_ptr<cf_name> cf_name)
: _cf_name(std::move(cf_name))
{
}
void cf_statement::prepare_keyspace(const service::client_state& state)
{
if (!_cf_name->has_keyspace()) {
// XXX: We explicitely only want to call state.getKeyspace() in this case, as we don't want to throw
// if not logged in any keyspace but a keyspace is explicitely set on the statement. So don't move
// the call outside the 'if' or replace the method by 'prepareKeyspace(state.getKeyspace())'
_cf_name->set_keyspace(state.get_keyspace(), true);
}
}
void cf_statement::prepare_keyspace(sstring keyspace)
{
if (!_cf_name->has_keyspace()) {
_cf_name->set_keyspace(keyspace, true);
}
}
const sstring& cf_statement::keyspace() const
{
assert(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly";
return _cf_name->get_keyspace();
}
const sstring& cf_statement::column_family() const
{
return _cf_name->get_column_family();
}
}
}

View File

@@ -57,16 +57,35 @@ class cf_statement : public parsed_statement {
protected:
::shared_ptr<cf_name> _cf_name;
cf_statement(::shared_ptr<cf_name> cf_name);
cf_statement(::shared_ptr<cf_name> cf_name)
: _cf_name(std::move(cf_name))
{ }
public:
virtual void prepare_keyspace(const service::client_state& state);
virtual void prepare_keyspace(const service::client_state& state) {
if (!_cf_name->has_keyspace()) {
// XXX: We explicitely only want to call state.getKeyspace() in this case, as we don't want to throw
// if not logged in any keyspace but a keyspace is explicitely set on the statement. So don't move
// the call outside the 'if' or replace the method by 'prepareKeyspace(state.getKeyspace())'
_cf_name->set_keyspace(state.get_keyspace(), true);
}
}
// Only for internal calls, use the version with ClientState for user queries
virtual void prepare_keyspace(sstring keyspace);
virtual void prepare_keyspace(sstring keyspace) {
if (!_cf_name->has_keyspace()) {
_cf_name->set_keyspace(keyspace, true);
}
}
virtual const sstring& keyspace() const;
virtual const sstring& keyspace() const {
assert(_cf_name->has_keyspace()); // "The statement hasn't be prepared correctly";
return _cf_name->get_keyspace();
}
virtual const sstring& column_family() const;
virtual const sstring& column_family() const {
return _cf_name->get_column_family();
}
};
}

View File

@@ -81,7 +81,7 @@ cql3::statements::create_index_statement::validate(distributed<service::storage_
auto cd = schema->get_column_definition(target->column->name());
if (cd == nullptr) {
throw exceptions::invalid_request_exception(sprint("No column definition found for column %s", *target->column));
throw exceptions::invalid_request_exception(sprint("No column definition found for column %s", target->column->name()));
}
bool is_map = dynamic_cast<const collection_type_impl *>(cd->type.get()) != nullptr
@@ -93,7 +93,7 @@ cql3::statements::create_index_statement::validate(distributed<service::storage_
throw exceptions::invalid_request_exception(
sprint("Cannot create index on %s of frozen<map> column %s",
index_target::index_option(target->type),
*target->column));
target->column->name()));
}
} else {
// validateNotFullIndex
@@ -107,7 +107,7 @@ cql3::statements::create_index_statement::validate(distributed<service::storage_
sprint(
"Cannot create index on %s of column %s; only non-frozen collections support %s indexes",
index_target::index_option(target->type),
*target->column,
target->column->name(),
index_target::index_option(target->type)));
}
// validateTargetColumnIsMapIfIndexInvolvesKeys
@@ -118,7 +118,7 @@ cql3::statements::create_index_statement::validate(distributed<service::storage_
sprint(
"Cannot create index on %s of column %s with non-map type",
index_target::index_option(target->type),
*target->column));
target->column->name()));
}
}
@@ -132,9 +132,9 @@ cql3::statements::create_index_statement::validate(distributed<service::storage_
"Cannot create index on %s(%s): an index on %s(%s) already exists and indexing "
"a map on more than one dimension at the same time is not currently supported",
index_target::index_option(target->type),
*target->column,
target->column->name(),
index_target::index_option(prev_type),
*target->column));
target->column->name()));
}
if (_if_not_exists) {
return;
@@ -164,13 +164,12 @@ cql3::statements::create_index_statement::validate(distributed<service::storage_
throw exceptions::invalid_request_exception(
sprint(
"Cannot create secondary index on partition key column %s",
*target->column));
target->column->name()));
}
}
future<bool>
cql3::statements::create_index_statement::announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only) {
throw std::runtime_error("Indexes are not supported yet");
auto schema = proxy.local().get_db().local().find_schema(keyspace(), column_family());
auto target = _raw_target->prepare(schema);

View File

@@ -1,130 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2015 ScyllaDB
*
* Modified by ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cql3/statements/create_keyspace_statement.hh"
#include "service/migration_manager.hh"
#include <regex>
namespace cql3 {
namespace statements {
create_keyspace_statement::create_keyspace_statement(const sstring& name, shared_ptr<ks_prop_defs> attrs, bool if_not_exists)
: _name{name}
, _attrs{attrs}
, _if_not_exists{if_not_exists}
{
}
const sstring& create_keyspace_statement::keyspace() const
{
return _name;
}
void create_keyspace_statement::check_access(const service::client_state& state)
{
warn(unimplemented::cause::PERMISSIONS);
#if 0
state.hasAllKeyspacesAccess(Permission.CREATE);
#endif
}
void create_keyspace_statement::validate(distributed<service::storage_proxy>&, const service::client_state& state)
{
std::string name;
name.resize(_name.length());
std::transform(_name.begin(), _name.end(), name.begin(), ::tolower);
if (name == db::system_keyspace::NAME) {
throw exceptions::invalid_request_exception("system keyspace is not user-modifiable");
}
// keyspace name
std::regex name_regex("\\w+");
if (!std::regex_match(name, name_regex)) {
throw exceptions::invalid_request_exception(sprint("\"%s\" is not a valid keyspace name", _name.c_str()));
}
if (name.length() > schema::NAME_LENGTH) {
throw exceptions::invalid_request_exception(sprint("Keyspace names shouldn't be more than %d characters long (got \"%s\")", schema::NAME_LENGTH, _name.c_str()));
}
_attrs->validate();
if (!bool(_attrs->get_replication_strategy_class())) {
throw exceptions::configuration_exception("Missing mandatory replication strategy class");
}
#if 0
// The strategy is validated through KSMetaData.validate() in announceNewKeyspace below.
// However, for backward compatibility with thrift, this doesn't validate unexpected options yet,
// so doing proper validation here.
AbstractReplicationStrategy.validateReplicationStrategy(name,
AbstractReplicationStrategy.getClass(attrs.getReplicationStrategyClass()),
StorageService.instance.getTokenMetadata(),
DatabaseDescriptor.getEndpointSnitch(),
attrs.getReplicationOptions());
#endif
}
future<bool> create_keyspace_statement::announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only)
{
return make_ready_future<>().then([this, is_local_only] {
return service::get_local_migration_manager().announce_new_keyspace(_attrs->as_ks_metadata(_name), is_local_only);
}).then_wrapped([this] (auto&& f) {
try {
f.get();
return true;
} catch (const exceptions::already_exists_exception& e) {
if (_if_not_exists) {
return false;
}
throw e;
}
});
}
shared_ptr<transport::event::schema_change> create_keyspace_statement::change_event()
{
return make_shared<transport::event::schema_change>(transport::event::schema_change::change_type::CREATED, keyspace());
}
}
}

View File

@@ -41,8 +41,11 @@
#pragma once
#include <regex>
#include "cql3/statements/schema_altering_statement.hh"
#include "cql3/statements/ks_prop_defs.hh"
#include "service/migration_manager.hh"
#include "transport/event.hh"
#include "core/shared_ptr.hh"
@@ -66,11 +69,22 @@ public:
* @param name the name of the keyspace to create
* @param attrs map of the raw keyword arguments that followed the <code>WITH</code> keyword.
*/
create_keyspace_statement(const sstring& name, shared_ptr<ks_prop_defs> attrs, bool if_not_exists);
create_keyspace_statement(const sstring& name, shared_ptr<ks_prop_defs> attrs, bool if_not_exists)
: _name{name}
, _attrs{attrs}
, _if_not_exists{if_not_exists}
{ }
virtual const sstring& keyspace() const override;
virtual const sstring& keyspace() const override {
return _name;
}
virtual void check_access(const service::client_state& state) override;
virtual void check_access(const service::client_state& state) override {
warn(unimplemented::cause::PERMISSIONS);
#if 0
state.hasAllKeyspacesAccess(Permission.CREATE);
#endif
}
/**
* The <code>CqlParser</code> only goes as far as extracting the keyword arguments
@@ -79,11 +93,58 @@ public:
*
* @throws InvalidRequestException if arguments are missing or unacceptable
*/
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override {
std::string name;
name.resize(_name.length());
std::transform(_name.begin(), _name.end(), name.begin(), ::tolower);
if (name == db::system_keyspace::NAME) {
throw exceptions::invalid_request_exception("system keyspace is not user-modifiable");
}
// keyspace name
std::regex name_regex("\\w+");
if (!std::regex_match(name, name_regex)) {
throw exceptions::invalid_request_exception(sprint("\"%s\" is not a valid keyspace name", _name.c_str()));
}
if (name.length() > schema::NAME_LENGTH) {
throw exceptions::invalid_request_exception(sprint("Keyspace names shouldn't be more than %d characters long (got \"%s\")", schema::NAME_LENGTH, _name.c_str()));
}
virtual future<bool> announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only) override;
_attrs->validate();
virtual shared_ptr<transport::event::schema_change> change_event() override;
if (!bool(_attrs->get_replication_strategy_class())) {
throw exceptions::configuration_exception("Missing mandatory replication strategy class");
}
#if 0
// The strategy is validated through KSMetaData.validate() in announceNewKeyspace below.
// However, for backward compatibility with thrift, this doesn't validate unexpected options yet,
// so doing proper validation here.
AbstractReplicationStrategy.validateReplicationStrategy(name,
AbstractReplicationStrategy.getClass(attrs.getReplicationStrategyClass()),
StorageService.instance.getTokenMetadata(),
DatabaseDescriptor.getEndpointSnitch(),
attrs.getReplicationOptions());
#endif
}
virtual future<bool> announce_migration(distributed<service::storage_proxy>& proxy, bool is_local_only) override {
return make_ready_future<>().then([this, is_local_only] {
return service::get_local_migration_manager().announce_new_keyspace(_attrs->as_ks_metadata(_name), is_local_only);
}).then_wrapped([this] (auto&& f) {
try {
f.get();
return true;
} catch (const exceptions::already_exists_exception& e) {
if (_if_not_exists) {
return false;
}
throw e;
}
});
}
virtual shared_ptr<transport::event::schema_change> change_event() override {
return make_shared<transport::event::schema_change>(transport::event::schema_change::change_type::CREATED, keyspace());
}
};
}

View File

@@ -45,14 +45,6 @@ namespace cql3 {
namespace statements {
delete_statement::delete_statement(statement_type type, uint32_t bound_terms, schema_ptr s, std::unique_ptr<attributes> attrs)
: modification_statement{type, bound_terms, std::move(s), std::move(attrs)}
{ }
bool delete_statement::require_full_clustering_key() const {
return false;
}
void delete_statement::add_update_for_key(mutation& m, const exploded_clustering_prefix& prefix, const update_parameters& params) {
if (_column_operations.empty()) {
m.partition().apply_delete(*s, prefix, params.make_tombstone());
@@ -104,17 +96,5 @@ delete_statement::parsed::prepare_internal(database& db, schema_ptr schema, ::sh
return stmt;
}
delete_statement::parsed::parsed(::shared_ptr<cf_name> name,
::shared_ptr<attributes::raw> attrs,
std::vector<::shared_ptr<operation::raw_deletion>> deletions,
std::vector<::shared_ptr<relation>> where_clause,
conditions_vector conditions,
bool if_exists)
: modification_statement::parsed(std::move(name), std::move(attrs), std::move(conditions), false, if_exists)
, _deletions(std::move(deletions))
, _where_clause(std::move(where_clause))
{ }
}
}

View File

@@ -55,9 +55,13 @@ namespace statements {
*/
class delete_statement : public modification_statement {
public:
delete_statement(statement_type type, uint32_t bound_terms, schema_ptr s, std::unique_ptr<attributes> attrs);
delete_statement(statement_type type, uint32_t bound_terms, schema_ptr s, std::unique_ptr<attributes> attrs)
: modification_statement{type, bound_terms, std::move(s), std::move(attrs)}
{ }
virtual bool require_full_clustering_key() const override;
virtual bool require_full_clustering_key() const override {
return false;
}
virtual void add_update_for_key(mutation& m, const exploded_clustering_prefix& prefix, const update_parameters& params) override;
@@ -90,7 +94,11 @@ public:
std::vector<::shared_ptr<operation::raw_deletion>> deletions,
std::vector<::shared_ptr<relation>> where_clause,
conditions_vector conditions,
bool if_exists);
bool if_exists)
: modification_statement::parsed(std::move(name), std::move(attrs), std::move(conditions), false, if_exists)
, _deletions(std::move(deletions))
, _where_clause(std::move(where_clause))
{ }
protected:
virtual ::shared_ptr<modification_statement> prepare_internal(database& db, schema_ptr schema,
::shared_ptr<variable_specifications> bound_names, std::unique_ptr<attributes> attrs);

View File

@@ -71,81 +71,6 @@ operator<<(std::ostream& out, modification_statement::statement_type t) {
return out;
}
modification_statement::modification_statement(statement_type type_, uint32_t bound_terms, schema_ptr schema_, std::unique_ptr<attributes> attrs_)
: type{type_}
, _bound_terms{bound_terms}
, s{schema_}
, attrs{std::move(attrs_)}
, _column_operations{}
{ }
bool modification_statement::uses_function(const sstring& ks_name, const sstring& function_name) const {
if (attrs->uses_function(ks_name, function_name)) {
return true;
}
for (auto&& e : _processed_keys) {
auto r = e.second;
if (r && r->uses_function(ks_name, function_name)) {
return true;
}
}
for (auto&& operation : _column_operations) {
if (operation && operation->uses_function(ks_name, function_name)) {
return true;
}
}
for (auto&& condition : _column_conditions) {
if (condition && condition->uses_function(ks_name, function_name)) {
return true;
}
}
for (auto&& condition : _static_conditions) {
if (condition && condition->uses_function(ks_name, function_name)) {
return true;
}
}
return false;
}
uint32_t modification_statement::get_bound_terms() {
return _bound_terms;
}
sstring modification_statement::keyspace() const {
return s->ks_name();
}
sstring modification_statement::column_family() const {
return s->cf_name();
}
bool modification_statement::is_counter() const {
return s->is_counter();
}
int64_t modification_statement::get_timestamp(int64_t now, const query_options& options) const {
return attrs->get_timestamp(now, options);
}
bool modification_statement::is_timestamp_set() const {
return attrs->is_timestamp_set();
}
gc_clock::duration modification_statement::get_time_to_live(const query_options& options) const {
return gc_clock::duration(attrs->get_time_to_live(options));
}
void modification_statement::check_access(const service::client_state& state) {
warn(unimplemented::cause::PERMISSIONS);
#if 0
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.MODIFY);
// CAS updates can be used to simulate a SELECT query, so should require Permission.SELECT as well.
if (hasConditions())
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.SELECT);
#endif
}
future<std::vector<mutation>>
modification_statement::get_mutations(distributed<service::storage_proxy>& proxy, const query_options& options, bool local, int64_t now) {
auto keys = make_lw_shared(build_partition_keys(options));
@@ -205,9 +130,9 @@ public:
const query::result_row_view& row) {
update_parameters::prefetch_data::row cells;
auto add_cell = [&cells] (column_id id, std::experimental::optional<collection_mutation_view>&& cell) {
auto add_cell = [&cells] (column_id id, std::experimental::optional<collection_mutation::view>&& cell) {
if (cell) {
cells.emplace(id, collection_mutation{to_bytes(cell->data)});
cells.emplace(id, collection_mutation::one{to_bytes(cell->data)});
}
};
@@ -270,7 +195,7 @@ modification_statement::read_required_rows(
for (auto&& pk : *keys) {
pr.emplace_back(dht::global_partitioner().decorate_key(*s, pk));
}
query::read_command cmd(s->id(), s->version(), ps, std::numeric_limits<uint32_t>::max());
query::read_command cmd(s->id(), ps, std::numeric_limits<uint32_t>::max());
// FIXME: ignoring "local"
return proxy.local().query(s, make_lw_shared(std::move(cmd)), std::move(pr), cl).then([this, ps] (auto result) {
// FIXME: copying
@@ -616,71 +541,6 @@ modification_statement::validate(distributed<service::storage_proxy>&, const ser
}
}
bool modification_statement::depends_on_keyspace(const sstring& ks_name) const {
return keyspace() == ks_name;
}
bool modification_statement::depends_on_column_family(const sstring& cf_name) const {
return column_family() == cf_name;
}
void modification_statement::add_operation(::shared_ptr<operation> op) {
if (op->column.is_static()) {
_sets_static_columns = true;
} else {
_sets_regular_columns = true;
}
_column_operations.push_back(std::move(op));
}
void modification_statement::add_condition(::shared_ptr<column_condition> cond) {
if (cond->column.is_static()) {
_sets_static_columns = true;
_static_conditions.emplace_back(std::move(cond));
} else {
_sets_regular_columns = true;
_column_conditions.emplace_back(std::move(cond));
}
}
void modification_statement::set_if_not_exist_condition() {
_if_not_exists = true;
}
bool modification_statement::has_if_not_exist_condition() const {
return _if_not_exists;
}
void modification_statement::set_if_exist_condition() {
_if_exists = true;
}
bool modification_statement::has_if_exist_condition() const {
return _if_exists;
}
bool modification_statement::requires_read() {
return std::any_of(_column_operations.begin(), _column_operations.end(), [] (auto&& op) {
return op->requires_read();
});
}
bool modification_statement::has_conditions() {
return _if_not_exists || _if_exists || !_column_conditions.empty() || !_static_conditions.empty();
}
void modification_statement::validate_where_clause_for_conditions() {
// no-op by default
}
modification_statement::parsed::parsed(::shared_ptr<cf_name> name, ::shared_ptr<attributes::raw> attrs, conditions_vector conditions, bool if_not_exists, bool if_exists)
: cf_statement{std::move(name)}
, _attrs{std::move(attrs)}
, _conditions{std::move(conditions)}
, _if_not_exists{if_not_exists}
, _if_exists{if_exists}
{ }
}
}

View File

@@ -107,37 +107,95 @@ private:
};
public:
modification_statement(statement_type type_, uint32_t bound_terms, schema_ptr schema_, std::unique_ptr<attributes> attrs_);
modification_statement(statement_type type_, uint32_t bound_terms, schema_ptr schema_, std::unique_ptr<attributes> attrs_)
: type{type_}
, _bound_terms{bound_terms}
, s{schema_}
, attrs{std::move(attrs_)}
, _column_operations{}
{ }
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override;
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
if (attrs->uses_function(ks_name, function_name)) {
return true;
}
for (auto&& e : _processed_keys) {
auto r = e.second;
if (r && r->uses_function(ks_name, function_name)) {
return true;
}
}
for (auto&& operation : _column_operations) {
if (operation && operation->uses_function(ks_name, function_name)) {
return true;
}
}
for (auto&& condition : _column_conditions) {
if (condition && condition->uses_function(ks_name, function_name)) {
return true;
}
}
for (auto&& condition : _static_conditions) {
if (condition && condition->uses_function(ks_name, function_name)) {
return true;
}
}
return false;
}
virtual bool require_full_clustering_key() const = 0;
virtual void add_update_for_key(mutation& m, const exploded_clustering_prefix& prefix, const update_parameters& params) = 0;
virtual uint32_t get_bound_terms() override;
virtual uint32_t get_bound_terms() override {
return _bound_terms;
}
virtual sstring keyspace() const;
virtual sstring keyspace() const {
return s->ks_name();
}
virtual sstring column_family() const;
virtual sstring column_family() const {
return s->cf_name();
}
virtual bool is_counter() const;
virtual bool is_counter() const {
return s->is_counter();
}
int64_t get_timestamp(int64_t now, const query_options& options) const;
int64_t get_timestamp(int64_t now, const query_options& options) const {
return attrs->get_timestamp(now, options);
}
bool is_timestamp_set() const;
bool is_timestamp_set() const {
return attrs->is_timestamp_set();
}
gc_clock::duration get_time_to_live(const query_options& options) const;
gc_clock::duration get_time_to_live(const query_options& options) const {
return gc_clock::duration(attrs->get_time_to_live(options));
}
virtual void check_access(const service::client_state& state) override;
virtual void check_access(const service::client_state& state) override {
warn(unimplemented::cause::PERMISSIONS);
#if 0
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.MODIFY);
// CAS updates can be used to simulate a SELECT query, so should require Permission.SELECT as well.
if (hasConditions())
state.hasColumnFamilyAccess(keyspace(), columnFamily(), Permission.SELECT);
#endif
}
void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
virtual bool depends_on_column_family(const sstring& cf_name) const override;
void add_operation(::shared_ptr<operation> op);
void add_operation(::shared_ptr<operation> op) {
if (op->column.is_static()) {
_sets_static_columns = true;
} else {
_sets_regular_columns = true;
}
_column_operations.push_back(std::move(op));
}
#if 0
public Iterable<ColumnDefinition> getColumnsWithConditions()
@@ -150,15 +208,31 @@ public:
}
#endif
public:
void add_condition(::shared_ptr<column_condition> cond);
void add_condition(::shared_ptr<column_condition> cond) {
if (cond->column.is_static()) {
_sets_static_columns = true;
_static_conditions.emplace_back(std::move(cond));
} else {
_sets_regular_columns = true;
_column_conditions.emplace_back(std::move(cond));
}
}
void set_if_not_exist_condition();
void set_if_not_exist_condition() {
_if_not_exists = true;
}
bool has_if_not_exist_condition() const;
bool has_if_not_exist_condition() const {
return _if_not_exists;
}
void set_if_exist_condition();
void set_if_exist_condition() {
_if_exists = true;
}
bool has_if_exist_condition() const;
bool has_if_exist_condition() const {
return _if_exists;
}
private:
void add_key_values(const column_definition& def, ::shared_ptr<restrictions::restriction> values);
@@ -176,7 +250,11 @@ protected:
const column_definition* get_first_empty_key();
public:
bool requires_read();
bool requires_read() {
return std::any_of(_column_operations.begin(), _column_operations.end(), [] (auto&& op) {
return op->requires_read();
});
}
protected:
future<update_parameters::prefetched_rows_type> read_required_rows(
@@ -187,7 +265,9 @@ protected:
db::consistency_level cl);
public:
bool has_conditions();
bool has_conditions() {
return _if_not_exists || _if_exists || !_column_conditions.empty() || !_static_conditions.empty();
}
virtual future<::shared_ptr<transport::messages::result_message>>
execute(distributed<service::storage_proxy>& proxy, service::query_state& qs, const query_options& options) override;
@@ -344,7 +424,9 @@ protected:
* processed to check that they are compatible.
* @throws InvalidRequestException
*/
virtual void validate_where_clause_for_conditions();
virtual void validate_where_clause_for_conditions() {
// no-op by default
}
public:
class parsed : public cf_statement {
@@ -357,7 +439,13 @@ public:
const bool _if_not_exists;
const bool _if_exists;
protected:
parsed(::shared_ptr<cf_name> name, ::shared_ptr<attributes::raw> attrs, conditions_vector conditions, bool if_not_exists, bool if_exists);
parsed(::shared_ptr<cf_name> name, ::shared_ptr<attributes::raw> attrs, conditions_vector conditions, bool if_not_exists, bool if_exists)
: cf_statement{std::move(name)}
, _attrs{std::move(attrs)}
, _conditions{std::move(conditions)}
, _if_not_exists{if_not_exists}
, _if_exists{if_exists}
{ }
public:
virtual ::shared_ptr<parsed_statement::prepared> prepare(database& db) override;

View File

@@ -1,83 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2014 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cql3/statements/parsed_statement.hh"
namespace cql3 {
namespace statements {
parsed_statement::~parsed_statement()
{ }
shared_ptr<variable_specifications> parsed_statement::get_bound_variables() {
return _variables;
}
// Used by the parser and preparable statement
void parsed_statement::set_bound_variables(const std::vector<::shared_ptr<column_identifier>>& bound_names) {
_variables = ::make_shared<variable_specifications>(bound_names);
}
bool parsed_statement::uses_function(const sstring& ks_name, const sstring& function_name) const {
return false;
}
parsed_statement::prepared::prepared(::shared_ptr<cql_statement> statement_, std::vector<::shared_ptr<column_specification>> bound_names_)
: statement(std::move(statement_))
, bound_names(std::move(bound_names_))
{ }
parsed_statement::prepared::prepared(::shared_ptr<cql_statement> statement_, const variable_specifications& names)
: prepared(statement_, names.get_specifications())
{ }
parsed_statement::prepared::prepared(::shared_ptr<cql_statement> statement_, variable_specifications&& names)
: prepared(statement_, std::move(names).get_specifications())
{ }
parsed_statement::prepared::prepared(::shared_ptr<cql_statement>&& statement_)
: prepared(statement_, std::vector<::shared_ptr<column_specification>>())
{ }
}
}

View File

@@ -60,29 +60,47 @@ private:
::shared_ptr<variable_specifications> _variables;
public:
virtual ~parsed_statement();
virtual ~parsed_statement()
{ }
shared_ptr<variable_specifications> get_bound_variables();
shared_ptr<variable_specifications> get_bound_variables() {
return _variables;
}
void set_bound_variables(const std::vector<::shared_ptr<column_identifier>>& bound_names);
// Used by the parser and preparable statement
void set_bound_variables(const std::vector<::shared_ptr<column_identifier>>& bound_names)
{
_variables = ::make_shared<variable_specifications>(bound_names);
}
class prepared {
public:
const ::shared_ptr<cql_statement> statement;
const std::vector<::shared_ptr<column_specification>> bound_names;
prepared(::shared_ptr<cql_statement> statement_, std::vector<::shared_ptr<column_specification>> bound_names_);
prepared(::shared_ptr<cql_statement> statement_, std::vector<::shared_ptr<column_specification>> bound_names_)
: statement(std::move(statement_))
, bound_names(std::move(bound_names_))
{ }
prepared(::shared_ptr<cql_statement> statement_, const variable_specifications& names);
prepared(::shared_ptr<cql_statement> statement_, const variable_specifications& names)
: prepared(statement_, names.get_specifications())
{ }
prepared(::shared_ptr<cql_statement> statement_, variable_specifications&& names);
prepared(::shared_ptr<cql_statement> statement_, variable_specifications&& names)
: prepared(statement_, std::move(names).get_specifications())
{ }
prepared(::shared_ptr<cql_statement>&& statement_);
prepared(::shared_ptr<cql_statement>&& statement_)
: prepared(statement_, std::vector<::shared_ptr<column_specification>>())
{ }
};
virtual ::shared_ptr<prepared> prepare(database& db) = 0;
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const;
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const {
return false;
}
};
}

View File

@@ -1,186 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Copyright 2015 Cloudius Systems
*
* Modified by Cloudius Systems
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cql3/statements/property_definitions.hh"
namespace cql3 {
namespace statements {
property_definitions::property_definitions()
: _properties{}
{ }
void property_definitions::add_property(const sstring& name, sstring value) {
auto it = _properties.find(name);
if (it != _properties.end()) {
throw exceptions::syntax_exception(sprint("Multiple definition for property '%s'", name));
}
_properties.emplace(name, value);
}
void property_definitions::add_property(const sstring& name, const std::map<sstring, sstring>& value) {
auto it = _properties.find(name);
if (it != _properties.end()) {
throw exceptions::syntax_exception(sprint("Multiple definition for property '%s'", name));
}
_properties.emplace(name, value);
}
void property_definitions::validate(const std::set<sstring>& keywords, const std::set<sstring>& obsolete) {
for (auto&& kv : _properties) {
auto&& name = kv.first;
if (keywords.count(name)) {
continue;
}
if (obsolete.count(name)) {
#if 0
logger.warn("Ignoring obsolete property {}", name);
#endif
} else {
throw exceptions::syntax_exception(sprint("Unknown property '%s'", name));
}
}
}
std::experimental::optional<sstring> property_definitions::get_simple(const sstring& name) const {
auto it = _properties.find(name);
if (it == _properties.end()) {
return std::experimental::nullopt;
}
try {
return boost::any_cast<sstring>(it->second);
} catch (const boost::bad_any_cast& e) {
throw exceptions::syntax_exception(sprint("Invalid value for property '%s'. It should be a string", name));
}
}
std::experimental::optional<std::map<sstring, sstring>> property_definitions::get_map(const sstring& name) const {
auto it = _properties.find(name);
if (it == _properties.end()) {
return std::experimental::nullopt;
}
try {
return boost::any_cast<std::map<sstring, sstring>>(it->second);
} catch (const boost::bad_any_cast& e) {
throw exceptions::syntax_exception(sprint("Invalid value for property '%s'. It should be a map.", name));
}
}
bool property_definitions::has_property(const sstring& name) const {
return _properties.find(name) != _properties.end();
}
sstring property_definitions::get_string(sstring key, sstring default_value) const {
auto value = get_simple(key);
if (value) {
return value.value();
} else {
return default_value;
}
}
// Return a property value, typed as a Boolean
bool property_definitions::get_boolean(sstring key, bool default_value) const {
auto value = get_simple(key);
if (value) {
std::string s{value.value()};
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
return s == "1" || s == "true" || s == "yes";
} else {
return default_value;
}
}
// Return a property value, typed as a double
double property_definitions::get_double(sstring key, double default_value) const {
auto value = get_simple(key);
return to_double(key, value, default_value);
}
double property_definitions::to_double(sstring key, std::experimental::optional<sstring> value, double default_value) {
if (value) {
auto val = value.value();
try {
return std::stod(val);
} catch (const std::exception& e) {
throw exceptions::syntax_exception(sprint("Invalid double value %s for '%s'", val, key));
}
} else {
return default_value;
}
}
// Return a property value, typed as an Integer
int32_t property_definitions::get_int(sstring key, int32_t default_value) const {
auto value = get_simple(key);
return to_int(key, value, default_value);
}
int32_t property_definitions::to_int(sstring key, std::experimental::optional<sstring> value, int32_t default_value) {
if (value) {
auto val = value.value();
try {
return std::stoi(val);
} catch (const std::exception& e) {
throw exceptions::syntax_exception(sprint("Invalid integer value %s for '%s'", val, key));
}
} else {
return default_value;
}
}
long property_definitions::to_long(sstring key, std::experimental::optional<sstring> value, long default_value) {
if (value) {
auto val = value.value();
try {
return std::stol(val);
} catch (const std::exception& e) {
throw exceptions::syntax_exception(sprint("Invalid long value %s for '%s'", val, key));
}
} else {
return default_value;
}
}
}
}

View File

@@ -66,38 +66,141 @@ protected:
#endif
std::unordered_map<sstring, boost::any> _properties;
property_definitions();
property_definitions()
: _properties{}
{ }
public:
void add_property(const sstring& name, sstring value);
void add_property(const sstring& name, sstring value) {
auto it = _properties.find(name);
if (it != _properties.end()) {
throw exceptions::syntax_exception(sprint("Multiple definition for property '%s'", name));
}
_properties.emplace(name, value);
}
void add_property(const sstring& name, const std::map<sstring, sstring>& value);
void validate(const std::set<sstring>& keywords, const std::set<sstring>& obsolete);
void add_property(const sstring& name, const std::map<sstring, sstring>& value) {
auto it = _properties.find(name);
if (it != _properties.end()) {
throw exceptions::syntax_exception(sprint("Multiple definition for property '%s'", name));
}
_properties.emplace(name, value);
}
void validate(const std::set<sstring>& keywords, const std::set<sstring>& obsolete) {
for (auto&& kv : _properties) {
auto&& name = kv.first;
if (keywords.count(name)) {
continue;
}
if (obsolete.count(name)) {
#if 0
logger.warn("Ignoring obsolete property {}", name);
#endif
} else {
throw exceptions::syntax_exception(sprint("Unknown property '%s'", name));
}
}
}
protected:
std::experimental::optional<sstring> get_simple(const sstring& name) const;
std::experimental::optional<std::map<sstring, sstring>> get_map(const sstring& name) const;
std::experimental::optional<sstring> get_simple(const sstring& name) const {
auto it = _properties.find(name);
if (it == _properties.end()) {
return std::experimental::nullopt;
}
try {
return boost::any_cast<sstring>(it->second);
} catch (const boost::bad_any_cast& e) {
throw exceptions::syntax_exception(sprint("Invalid value for property '%s'. It should be a string", name));
}
}
std::experimental::optional<std::map<sstring, sstring>> get_map(const sstring& name) const {
auto it = _properties.find(name);
if (it == _properties.end()) {
return std::experimental::nullopt;
}
try {
return boost::any_cast<std::map<sstring, sstring>>(it->second);
} catch (const boost::bad_any_cast& e) {
throw exceptions::syntax_exception(sprint("Invalid value for property '%s'. It should be a map.", name));
}
}
public:
bool has_property(const sstring& name) const;
bool has_property(const sstring& name) const {
return _properties.find(name) != _properties.end();
}
sstring get_string(sstring key, sstring default_value) const;
sstring get_string(sstring key, sstring default_value) const {
auto value = get_simple(key);
if (value) {
return value.value();
} else {
return default_value;
}
}
// Return a property value, typed as a Boolean
bool get_boolean(sstring key, bool default_value) const;
bool get_boolean(sstring key, bool default_value) const {
auto value = get_simple(key);
if (value) {
std::string s{value.value()};
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
return s == "1" || s == "true" || s == "yes";
} else {
return default_value;
}
}
// Return a property value, typed as a double
double get_double(sstring key, double default_value) const;
double get_double(sstring key, double default_value) const {
auto value = get_simple(key);
return to_double(key, value, default_value);
}
static double to_double(sstring key, std::experimental::optional<sstring> value, double default_value);
static double to_double(sstring key, std::experimental::optional<sstring> value, double default_value) {
if (value) {
auto val = value.value();
try {
return std::stod(val);
} catch (const std::exception& e) {
throw exceptions::syntax_exception(sprint("Invalid double value %s for '%s'", val, key));
}
} else {
return default_value;
}
}
// Return a property value, typed as an Integer
int32_t get_int(sstring key, int32_t default_value) const;
int32_t get_int(sstring key, int32_t default_value) const {
auto value = get_simple(key);
return to_int(key, value, default_value);
}
static int32_t to_int(sstring key, std::experimental::optional<sstring> value, int32_t default_value);
static int32_t to_int(sstring key, std::experimental::optional<sstring> value, int32_t default_value) {
if (value) {
auto val = value.value();
try {
return std::stoi(val);
} catch (const std::exception& e) {
throw exceptions::syntax_exception(sprint("Invalid integer value %s for '%s'", val, key));
}
} else {
return default_value;
}
}
static long to_long(sstring key, std::experimental::optional<sstring> value, long default_value);
static long to_long(sstring key, std::experimental::optional<sstring> value, long default_value) {
if (value) {
auto val = value.value();
try {
return std::stol(val);
} catch (const std::exception& e) {
throw exceptions::syntax_exception(sprint("Invalid long value %s for '%s'", val, key));
}
} else {
return default_value;
}
}
};
}

View File

@@ -47,50 +47,6 @@ namespace cql3 {
namespace statements {
schema_altering_statement::schema_altering_statement()
: cf_statement{::shared_ptr<cf_name>{}}
, _is_column_family_level{false}
{
}
schema_altering_statement::schema_altering_statement(::shared_ptr<cf_name> name)
: cf_statement{std::move(name)}
, _is_column_family_level{true}
{
}
bool schema_altering_statement::uses_function(const sstring& ks_name, const sstring& function_name) const
{
return cf_statement::uses_function(ks_name, function_name);
}
bool schema_altering_statement::depends_on_keyspace(const sstring& ks_name) const
{
return false;
}
bool schema_altering_statement::depends_on_column_family(const sstring& cf_name) const
{
return false;
}
uint32_t schema_altering_statement::get_bound_terms()
{
return 0;
}
void schema_altering_statement::prepare_keyspace(const service::client_state& state)
{
if (_is_column_family_level) {
cf_statement::prepare_keyspace(state);
}
}
::shared_ptr<parsed_statement::prepared> schema_altering_statement::prepare(database& db)
{
return ::make_shared<parsed_statement::prepared>(this->shared_from_this());
}
future<::shared_ptr<messages::result_message>>
schema_altering_statement::execute0(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options, bool is_local_only) {
// If an IF [NOT] EXISTS clause was used, this may not result in an actual schema change. To avoid doing

View File

@@ -41,7 +41,16 @@
#pragma once
#include "transport/messages_fwd.hh"
namespace transport {
namespace messages {
class result_message;
}
}
#include "transport/event.hh"
#include "cql3/statements/cf_statement.hh"
@@ -67,21 +76,33 @@ private:
future<::shared_ptr<messages::result_message>>
execute0(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options, bool);
protected:
schema_altering_statement();
schema_altering_statement()
: cf_statement{::shared_ptr<cf_name>{}}
, _is_column_family_level{false}
{ }
schema_altering_statement(::shared_ptr<cf_name> name);
schema_altering_statement(::shared_ptr<cf_name> name)
: cf_statement{std::move(name)}
, _is_column_family_level{true}
{ }
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override;
virtual bool uses_function(const sstring& ks_name, const sstring& function_name) const override {
return cf_statement::uses_function(ks_name, function_name);
}
virtual bool depends_on_keyspace(const sstring& ks_name) const override;
virtual uint32_t get_bound_terms() override {
return 0;
}
virtual bool depends_on_column_family(const sstring& cf_name) const override;
virtual void prepare_keyspace(const service::client_state& state) override {
if (_is_column_family_level) {
cf_statement::prepare_keyspace(state);
}
}
virtual uint32_t get_bound_terms() override;
virtual void prepare_keyspace(const service::client_state& state) override;
virtual ::shared_ptr<prepared> prepare(database& db) override;
virtual ::shared_ptr<prepared> prepare(database& db) override {
return ::make_shared<parsed_statement::prepared>(this->shared_from_this());
}
virtual shared_ptr<transport::event::schema_change> change_event() = 0;

View File

@@ -46,7 +46,6 @@
#include "core/shared_ptr.hh"
#include "query-result-reader.hh"
#include "query_result_merger.hh"
#include "service/pager/query_pagers.hh"
namespace cql3 {
@@ -54,31 +53,6 @@ namespace statements {
thread_local const shared_ptr<select_statement::parameters> select_statement::_default_parameters = ::make_shared<select_statement::parameters>();
select_statement::parameters::parameters()
: _is_distinct{false}
, _allow_filtering{false}
{ }
select_statement::parameters::parameters(orderings_type orderings,
bool is_distinct,
bool allow_filtering)
: _orderings{std::move(orderings)}
, _is_distinct{is_distinct}
, _allow_filtering{allow_filtering}
{ }
bool select_statement::parameters::is_distinct() {
return _is_distinct;
}
bool select_statement::parameters::allow_filtering() {
return _allow_filtering;
}
select_statement::parameters::orderings_type const& select_statement::parameters::orderings() {
return _orderings;
}
select_statement::select_statement(schema_ptr schema,
uint32_t bound_terms,
::shared_ptr<parameters> parameters,
@@ -132,22 +106,6 @@ void select_statement::validate(distributed<service::storage_proxy>&, const serv
// Nothing to do, all validation has been done by raw_statemet::prepare()
}
bool select_statement::depends_on_keyspace(const sstring& ks_name) const {
return keyspace() == ks_name;
}
bool select_statement::depends_on_column_family(const sstring& cf_name) const {
return column_family() == cf_name;
}
const sstring& select_statement::keyspace() const {
return _schema->ks_name();
}
const sstring& select_statement::column_family() const {
return _schema->cf_name();
}
query::partition_slice
select_statement::make_partition_slice(const query_options& options) {
std::vector<column_id> static_columns;
@@ -194,7 +152,7 @@ int32_t select_statement::get_limit(const query_options& options) const {
try {
int32_type->validate(*val);
auto l = value_cast<int32_t>(int32_type->deserialize(*val));
auto l = boost::any_cast<int32_t>(int32_type->deserialize(*val));
if (l <= 0) {
throw exceptions::invalid_request_exception("LIMIT must be strictly positive");
}
@@ -218,8 +176,7 @@ select_statement::execute(distributed<service::storage_proxy>& proxy, service::q
int32_t limit = get_limit(options);
auto now = db_clock::now();
auto command = ::make_lw_shared<query::read_command>(_schema->id(), _schema->version(),
make_partition_slice(options), limit, to_gc_clock(now));
auto command = ::make_lw_shared<query::read_command>(_schema->id(), make_partition_slice(options), limit, to_gc_clock(now));
int32_t page_size = options.get_page_size();
@@ -230,51 +187,37 @@ select_statement::execute(distributed<service::storage_proxy>& proxy, service::q
page_size = DEFAULT_COUNT_PAGE_SIZE;
}
auto key_ranges = _restrictions->get_partition_key_ranges(options);
warn(unimplemented::cause::PAGING);
return execute(proxy, command, _restrictions->get_partition_key_ranges(options), state, options, now);
if (page_size <= 0
|| !service::pager::query_pagers::may_need_paging(page_size,
*command, key_ranges)) {
return execute(proxy, command, std::move(key_ranges), state, options,
now);
#if 0
if (page_size <= 0 || !command || !query_pagers::may_need_paging(command, page_size)) {
return execute(proxy, command, state, options, now);
}
auto p = service::pager::query_pagers::pager(_schema, _selection,
state, options, command, std::move(key_ranges));
auto pager = query_pagers::pager(command, cl, state.get_client_state(), options.get_paging_state());
if (_selection->is_aggregate()) {
return do_with(
cql3::selection::result_set_builder(*_selection, now,
options.get_serialization_format()),
[p, page_size, now](auto& builder) {
return do_until([p] {return p->is_exhausted();},
[p, &builder, page_size, now] {
return p->fetch_page(builder, page_size, now);
}
).then([&builder] {
auto rs = builder.build();
auto msg = ::make_shared<transport::messages::result_message::rows>(std::move(rs));
return make_ready_future<shared_ptr<transport::messages::result_message>>(std::move(msg));
});
});
if (selection->isAggregate()) {
return page_aggregate_query(pager, options, page_size, now);
}
// We can't properly do post-query ordering if we page (see #6722)
if (needs_post_query_ordering()) {
throw exceptions::invalid_request_exception(
"Cannot page queries with both ORDER BY and a IN restriction on the partition key;"
" you must either remove the ORDER BY or the IN and sort client side, or disable paging for this query");
"Cannot page queries with both ORDER BY and a IN restriction on the partition key;"
" you must either remove the ORDER BY or the IN and sort client side, or disable paging for this query");
}
return p->fetch_page(page_size, now).then(
[this, p, &options, limit, now](std::unique_ptr<cql3::result_set> rs) {
return pager->fetch_page(page_size).then([this, pager, &options, limit, now] (auto page) {
auto msg = process_results(page, options, limit, now);
if (!p->is_exhausted()) {
rs->get_metadata().set_has_more_pages(p->state());
}
if (!pager->is_exhausted()) {
msg->result->metadata->set_has_more_pages(pager->state());
}
auto msg = ::make_shared<transport::messages::result_message::rows>(std::move(rs));
return make_ready_future<shared_ptr<transport::messages::result_message>>(std::move(msg));
});
return msg;
});
#endif
}
future<shared_ptr<transport::messages::result_message>>
@@ -309,8 +252,7 @@ future<::shared_ptr<transport::messages::result_message>>
select_statement::execute_internal(distributed<service::storage_proxy>& proxy, service::query_state& state, const query_options& options) {
int32_t limit = get_limit(options);
auto now = db_clock::now();
auto command = ::make_lw_shared<query::read_command>(_schema->id(), _schema->version(),
make_partition_slice(options), limit);
auto command = ::make_lw_shared<query::read_command>(_schema->id(), make_partition_slice(options), limit);
auto partition_ranges = _restrictions->get_partition_key_ranges(options);
if (needs_post_query_ordering() && _limit) {
@@ -331,18 +273,114 @@ select_statement::execute_internal(distributed<service::storage_proxy>& proxy, s
}
}
shared_ptr<transport::messages::result_message> select_statement::process_results(
foreign_ptr<lw_shared_ptr<query::result>> results,
lw_shared_ptr<query::read_command> cmd, const query_options& options,
db_clock::time_point now) {
// Implements ResultVisitor concept from query.hh
class result_set_building_visitor {
cql3::selection::result_set_builder& builder;
select_statement& stmt;
uint32_t _row_count;
std::vector<bytes> _partition_key;
std::vector<bytes> _clustering_key;
public:
result_set_building_visitor(cql3::selection::result_set_builder& builder, select_statement& stmt)
: builder(builder)
, stmt(stmt)
, _row_count(0)
{ }
void add_value(const column_definition& def, query::result_row_view::iterator_type& i) {
if (def.type->is_multi_cell()) {
auto cell = i.next_collection_cell();
if (!cell) {
builder.add_empty();
return;
}
builder.add(def, *cell);
} else {
auto cell = i.next_atomic_cell();
if (!cell) {
builder.add_empty();
return;
}
builder.add(def, *cell);
}
};
void accept_new_partition(const partition_key& key, uint32_t row_count) {
_partition_key = key.explode(*stmt._schema);
_row_count = row_count;
}
void accept_new_partition(uint32_t row_count) {
_row_count = row_count;
}
void accept_new_row(const clustering_key& key, const query::result_row_view& static_row,
const query::result_row_view& row) {
_clustering_key = key.explode(*stmt._schema);
accept_new_row(static_row, row);
}
void accept_new_row(const query::result_row_view& static_row, const query::result_row_view& row) {
auto static_row_iterator = static_row.iterator();
auto row_iterator = row.iterator();
builder.new_row();
for (auto&& def : stmt._selection->get_columns()) {
switch (def->kind) {
case column_kind::partition_key:
builder.add(_partition_key[def->component_index()]);
break;
case column_kind::clustering_key:
builder.add(_clustering_key[def->component_index()]);
break;
case column_kind::regular_column:
add_value(*def, row_iterator);
break;
case column_kind::compact_column:
add_value(*def, row_iterator);
break;
case column_kind::static_column:
add_value(*def, static_row_iterator);
break;
default:
assert(0);
}
}
}
void accept_partition_end(const query::result_row_view& static_row) {
if (_row_count == 0) {
builder.new_row();
auto static_row_iterator = static_row.iterator();
for (auto&& def : stmt._selection->get_columns()) {
if (def->is_partition_key()) {
builder.add(_partition_key[def->component_index()]);
} else if (def->is_static()) {
add_value(*def, static_row_iterator);
} else {
builder.add_empty();
}
}
}
}
};
shared_ptr<transport::messages::result_message>
select_statement::process_results(foreign_ptr<lw_shared_ptr<query::result>> results, lw_shared_ptr<query::read_command> cmd,
const query_options& options, db_clock::time_point now) {
cql3::selection::result_set_builder builder(*_selection, now, options.get_serialization_format());
// FIXME: This special casing saves us the cost of copying an already
// linearized response. When we switch views to scattered_reader this will go away.
if (results->buf().is_linearized()) {
query::result_view view(results->buf().view());
view.consume(cmd->slice, result_set_building_visitor(builder, *this));
} else {
bytes_ostream w(results->buf());
query::result_view view(w.linearize());
view.consume(cmd->slice, result_set_building_visitor(builder, *this));
}
cql3::selection::result_set_builder builder(*_selection, now,
options.get_serialization_format());
query::result_view::consume(results->buf(), cmd->slice,
cql3::selection::result_set_builder::visitor(builder, *_schema,
*_selection));
auto rs = builder.build();
if (needs_post_query_ordering()) {
rs->sort(_ordering_comparator);
if (_is_reversed) {
@@ -353,18 +391,6 @@ shared_ptr<transport::messages::result_message> select_statement::process_result
return ::make_shared<transport::messages::result_message::rows>(std::move(rs));
}
select_statement::raw_statement::raw_statement(::shared_ptr<cf_name> cf_name,
::shared_ptr<parameters> parameters,
std::vector<::shared_ptr<selection::raw_selector>> select_clause,
std::vector<::shared_ptr<relation>> where_clause,
::shared_ptr<term::raw> limit)
: cf_statement(std::move(cf_name))
, _parameters(std::move(parameters))
, _select_clause(std::move(select_clause))
, _where_clause(std::move(where_clause))
, _limit(std::move(limit))
{ }
::shared_ptr<parsed_statement::prepared>
select_statement::raw_statement::prepare(database& db) {
schema_ptr schema = validation::validate_column_family(db, keyspace(), column_family());

View File

@@ -63,6 +63,7 @@ namespace statements {
*
*/
class select_statement : public cql_statement {
friend class result_set_building_visitor;
public:
class parameters final {
public:
@@ -72,13 +73,20 @@ public:
const bool _is_distinct;
const bool _allow_filtering;
public:
parameters();
parameters()
: _is_distinct{false}
, _allow_filtering{false}
{ }
parameters(orderings_type orderings,
bool is_distinct,
bool allow_filtering);
bool is_distinct();
bool allow_filtering();
orderings_type const& orderings();
bool allow_filtering)
: _orderings{std::move(orderings)}
, _is_distinct{is_distinct}
, _allow_filtering{allow_filtering}
{ }
bool is_distinct() { return _is_distinct; }
bool allow_filtering() { return _allow_filtering; }
orderings_type const& orderings() { return _orderings; }
};
private:
static constexpr int DEFAULT_COUNT_PAGE_SIZE = 10000;
@@ -124,8 +132,6 @@ public:
virtual uint32_t get_bound_terms() override;
virtual void check_access(const service::client_state& state) override;
virtual void validate(distributed<service::storage_proxy>&, const service::client_state& state) override;
virtual bool depends_on_keyspace(const sstring& ks_name) const;
virtual bool depends_on_column_family(const sstring& cf_name) const;
virtual future<::shared_ptr<transport::messages::result_message>> execute(distributed<service::storage_proxy>& proxy,
service::query_state& state, const query_options& options) override;
@@ -186,12 +192,18 @@ public:
QueryOptions options = QueryOptions.DEFAULT;
return process(rows, options, getLimit(options), System.currentTimeMillis());
}
public String keyspace()
{
return _schema.ks_name;
}
public String columnFamily()
{
return _schema.cfName;
}
#endif
const sstring& keyspace() const;
const sstring& column_family() const;
query::partition_slice make_partition_slice(const query_options& options);
#if 0
@@ -446,7 +458,13 @@ public:
::shared_ptr<parameters> parameters,
std::vector<::shared_ptr<selection::raw_selector>> select_clause,
std::vector<::shared_ptr<relation>> where_clause,
::shared_ptr<term::raw> limit);
::shared_ptr<term::raw> limit)
: cf_statement(std::move(cf_name))
, _parameters(std::move(parameters))
, _select_clause(std::move(select_clause))
, _where_clause(std::move(where_clause))
, _limit(std::move(limit))
{ }
virtual ::shared_ptr<prepared> prepare(database& db) override;
private:

Some files were not shown because too many files have changed in this diff Show More