mirror of
https://github.com/scylladb/scylladb.git
synced 2026-04-21 17:10:35 +00:00
treewide: fix spelling errors.
Fix various spelling errors. Closes scylladb/scylladb#29574
This commit is contained in:
committed by
Avi Kivity
parent
cb8253067d
commit
74b523ea20
@@ -1892,7 +1892,7 @@ future<executor::request_return_type> executor::update_table(client_state& clien
|
||||
}
|
||||
if (vector_index_updates->Size() > 1) {
|
||||
// VectorIndexUpdates mirrors GlobalSecondaryIndexUpdates.
|
||||
// Since DynamoDB artifically limits the latter to just a
|
||||
// Since DynamoDB artificially limits the latter to just a
|
||||
// single operation (one Create or one Delete), we also
|
||||
// place the same artificial limit on VectorIndexUpdates,
|
||||
// and throw the same LimitExceeded error if the client
|
||||
|
||||
@@ -1354,7 +1354,7 @@ static future<executor::request_return_type> query_vector(
|
||||
std::unordered_set<std::string> used_attribute_values;
|
||||
// Parse the Select parameter and determine which attributes to return.
|
||||
// For a vector index, the default Select is ALL_ATTRIBUTES (full items).
|
||||
// ALL_PROJECTED_ATTRIBUTES is significantly more efficent because it
|
||||
// ALL_PROJECTED_ATTRIBUTES is significantly more efficient because it
|
||||
// returns what the vector store returned without looking up additional
|
||||
// base-table data. Currently only the primary key attributes are projected
|
||||
// but in the future we'll implement projecting additional attributes into
|
||||
|
||||
@@ -582,7 +582,7 @@ void stream_id_range::prepare_for_iterating()
|
||||
// the function returns `stream_id_range` that will allow iteration over children Streams shards for the Streams shard `parent`
|
||||
// a child Streams shard is defined as a Streams shard that touches token range that was previously covered by `parent` Streams shard
|
||||
// Streams shard contains a token, that represents end of the token range for that Streams shard (inclusive)
|
||||
// begginning of the token range is defined by previous Streams shard's token + 1
|
||||
// beginning of the token range is defined by previous Streams shard's token + 1
|
||||
// NOTE: With vnodes, ranges of Streams' shards wrap, while with tablets the biggest allowed token number is always a range end.
|
||||
// NOTE: both streams generation are guaranteed to cover whole range and be non-empty
|
||||
// NOTE: it's possible to get more than one stream shard with the same token value (thus some of those stream shards will be empty) -
|
||||
|
||||
@@ -1625,7 +1625,7 @@ struct process_change_visitor {
|
||||
if (_enable_updating_state) {
|
||||
if (_request_options.alternator && _alternator_schema_has_no_clustering_key && _clustering_row_states.empty()) {
|
||||
// Alternator's table can be with or without clustering key. If the clustering key exists,
|
||||
// delete request will be `clustered_row_delete` and will be hanlded there.
|
||||
// delete request will be `clustered_row_delete` and will be handled there.
|
||||
// If the clustering key doesn't exist, delete request will be `partition_delete` and will be handled here.
|
||||
// The no-clustering-key case is slightly tricky, because insert of such item is handled by `clustered_row_cells`
|
||||
// and has some value as clustering_key (the value currently seems to be empty bytes object).
|
||||
@@ -1933,7 +1933,7 @@ public:
|
||||
if (_options.alternator && !_alternator_clustering_keys_to_ignore.empty()) {
|
||||
// we filter mutations for Alternator's changes here.
|
||||
// We do it per mutation object (user might submit a batch of those in one go
|
||||
// and some might be splitted because of different timestamps),
|
||||
// and some might be split because of different timestamps),
|
||||
// ignore key set is cleared afterwards.
|
||||
// If single mutation object contains two separate changes to the same row
|
||||
// and at least one of them is ignored, all of them will be ignored.
|
||||
|
||||
@@ -277,7 +277,7 @@ filter_for_query(consistency_level cl,
|
||||
|
||||
host_id_vector_replica_set selected_endpoints;
|
||||
|
||||
// Pre-select endpoints based on client preference. If the endpoints
|
||||
// Preselect endpoints based on client preference. If the endpoints
|
||||
// selected this way aren't enough to satisfy CL requirements select the
|
||||
// remaining ones according to the load-balancing strategy as before.
|
||||
if (!preferred_endpoints.empty()) {
|
||||
|
||||
@@ -271,7 +271,7 @@ The json structure is as follows:
|
||||
}
|
||||
|
||||
The `manifest` member contains the following attributes:
|
||||
- `version` - respresenting the version of the manifest itself. It is incremented when members are added or removed from the manifest.
|
||||
- `version` - representing the version of the manifest itself. It is incremented when members are added or removed from the manifest.
|
||||
- `scope` - the scope of metadata stored in this manifest file. The following scopes are supported:
|
||||
- `node` - the manifest describes all SSTables owned by this node in this snapshot.
|
||||
|
||||
|
||||
2
main.cc
2
main.cc
@@ -942,7 +942,7 @@ To start the scylla server proper, simply invoke as: scylla server (or just scyl
|
||||
|
||||
auto background_reclaim_scheduling_group = create_scheduling_group("background_reclaim", "bgre", 50).get();
|
||||
|
||||
// Maintenance supergroup -- the collection of background low-prio activites
|
||||
// Maintenance supergroup -- the collection of background low-prio activities
|
||||
auto maintenance_supergroup = create_scheduling_supergroup(200).get();
|
||||
auto bandwidth_updater = io_throughput_updater("maintenance supergroup", maintenance_supergroup,
|
||||
cfg->maintenance_io_throughput_mb_per_sec.is_set() ? cfg->maintenance_io_throughput_mb_per_sec : cfg->stream_io_throughput_mb_per_sec);
|
||||
|
||||
@@ -775,7 +775,7 @@ inline estimated_histogram estimated_histogram_merge(estimated_histogram a, cons
|
||||
/**
|
||||
* bytes_histogram is an estimated histogram for byte values.
|
||||
* It covers the range of 1KB to 1GB with exponential (power-of-2) buckets.
|
||||
* Min backet is set to 512 bytes so the bucket upper limit will be 1024B.
|
||||
* Min bucket is set to 512 bytes so the bucket upper limit will be 1024B.
|
||||
*/
|
||||
using bytes_histogram = approx_exponential_histogram<512, 1024*1024*1024, 1>;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user