codebase wide: use try_emplace when appropriate
C++17 introduced try_emplace for maps to replace a pattern:
if(element not in a map) {
map.emplace(...)
}
try_emplace is more efficient and results in a more concise code.
This commit introduces usage of try_emplace when it's appropriate.
Tests: unit(dev)
Signed-off-by: Piotr Jastrzebski <piotr@scylladb.com>
Message-Id: <4970091ed770e233884633bf6d46111369e7d2dd.1597327358.git.piotr@scylladb.com>
This commit is contained in:
committed by
Avi Kivity
parent
39400f58fb
commit
01ea159fde
@@ -1748,12 +1748,8 @@ static future<> do_batch_write(service::storage_proxy& proxy,
|
||||
key_builders(1, schema_decorated_key_hash{}, schema_decorated_key_equal{});
|
||||
for (auto& b : mutation_builders) {
|
||||
auto dk = dht::decorate_key(*b.first, b.second.pk());
|
||||
auto it = key_builders.find({b.first, dk});
|
||||
if (it == key_builders.end()) {
|
||||
key_builders.emplace(schema_decorated_key{b.first, dk}, std::vector<put_or_delete_item>{std::move(b.second)});
|
||||
} else {
|
||||
it->second.push_back(std::move(b.second));
|
||||
}
|
||||
auto [it, added] = key_builders.try_emplace(schema_decorated_key{b.first, dk});
|
||||
it->second.push_back(std::move(b.second));
|
||||
}
|
||||
return parallel_for_each(std::move(key_builders), [&proxy, &client_state, &stats, trace_state, ssg, permit = std::move(permit)] (auto& e) {
|
||||
stats.write_using_lwt++;
|
||||
|
||||
@@ -50,17 +50,15 @@ property_definitions::property_definitions()
|
||||
{ }
|
||||
|
||||
void property_definitions::add_property(const sstring& name, sstring value) {
|
||||
if (_properties.contains(name)) {
|
||||
if (auto [ignored, added] = _properties.try_emplace(name, value); !added) {
|
||||
throw exceptions::syntax_exception(format("Multiple definition for property '{}'", name));
|
||||
}
|
||||
_properties.emplace(name, value);
|
||||
}
|
||||
|
||||
void property_definitions::add_property(const sstring& name, const std::map<sstring, sstring>& value) {
|
||||
if (_properties.contains(name)) {
|
||||
if (auto [ignored, added] = _properties.try_emplace(name, value); !added) {
|
||||
throw exceptions::syntax_exception(format("Multiple definition for property '{}'", name));
|
||||
}
|
||||
_properties.emplace(name, value);
|
||||
}
|
||||
|
||||
void property_definitions::validate(const std::set<sstring>& keywords, const std::set<sstring>& exts, const std::set<sstring>& obsolete) const {
|
||||
|
||||
@@ -678,10 +678,9 @@ database::shard_of(const frozen_mutation& m) {
|
||||
}
|
||||
|
||||
void database::add_keyspace(sstring name, keyspace k) {
|
||||
if (_keyspaces.contains(name)) {
|
||||
if (auto [ignored, added] = _keyspaces.try_emplace(std::move(name), std::move(k)); !added) {
|
||||
throw std::invalid_argument("Keyspace " + name + " already exists");
|
||||
}
|
||||
_keyspaces.emplace(std::move(name), std::move(k));
|
||||
}
|
||||
|
||||
future<> database::update_keyspace(const sstring& name) {
|
||||
|
||||
@@ -296,10 +296,7 @@ future<> db::commitlog_replayer::impl::process(stats* s, commitlog::buffer_and_r
|
||||
// lower than anything the new session will produce.
|
||||
if (cf.schema()->version() != fm.schema_version()) {
|
||||
auto& local_cm = _column_mappings.local().map;
|
||||
auto cm_it = local_cm.find(fm.schema_version());
|
||||
if (cm_it == local_cm.end()) {
|
||||
cm_it = local_cm.emplace(fm.schema_version(), src_cm).first;
|
||||
}
|
||||
auto cm_it = local_cm.try_emplace(fm.schema_version(), src_cm).first;
|
||||
const column_mapping& cm = cm_it->second;
|
||||
mutation m(cf.schema(), fm.decorated_key(*cf.schema()));
|
||||
converting_mutation_partition_applier v(cm, *cf.schema(), m.partition());
|
||||
|
||||
@@ -148,15 +148,13 @@ snapshot_ctl::get_snapshot_details() {
|
||||
public:
|
||||
future<> operator()(const snapshot_map& value) {
|
||||
for (auto&& vp: value) {
|
||||
if (!_result.contains(vp.first)) {
|
||||
_result.emplace(vp.first, std::move(vp.second));
|
||||
if (auto [ignored, added] = _result.try_emplace(vp.first, std::move(vp.second)); added) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto& rp = _result.at(vp.first);
|
||||
for (auto&& cf: vp.second) {
|
||||
if (!rp.contains(cf.first)) {
|
||||
rp.emplace(cf.first, std::move(cf.second));
|
||||
if (auto [ignored, added] = rp.try_emplace(cf.first, std::move(cf.second)); added) {
|
||||
continue;
|
||||
}
|
||||
auto& rcf = rp.at(cf.first);
|
||||
@@ -177,10 +175,8 @@ snapshot_ctl::get_snapshot_details() {
|
||||
return parallel_for_each(db.get_column_families(), [local_snapshots] (auto& cf_pair) {
|
||||
return cf_pair.second->get_snapshot_details().then([uuid = cf_pair.first, local_snapshots] (auto map) {
|
||||
for (auto&& snap_map: map) {
|
||||
if (!local_snapshots->contains(snap_map.first)) {
|
||||
local_snapshots->emplace(snap_map.first, cf_snapshot_map());
|
||||
}
|
||||
local_snapshots->at(snap_map.first).emplace(uuid, snap_map.second);
|
||||
auto [it, ignored] = local_snapshots->try_emplace(snap_map.first);
|
||||
it->second.emplace(uuid, snap_map.second);
|
||||
}
|
||||
return make_ready_future<>();
|
||||
});
|
||||
|
||||
@@ -65,11 +65,7 @@ row_locker::lock_holder::lock_holder(row_locker* locker, const dht::decorated_ke
|
||||
future<row_locker::lock_holder>
|
||||
row_locker::lock_pk(const dht::decorated_key& pk, bool exclusive, db::timeout_clock::time_point timeout, stats& stats) {
|
||||
mylog.debug("taking {} lock on entire partition {}", (exclusive ? "exclusive" : "shared"), pk);
|
||||
auto i = _two_level_locks.find(pk);
|
||||
if (i == _two_level_locks.end()) {
|
||||
// Lock doesn't exist, we need to create it first
|
||||
i = _two_level_locks.emplace(pk, this).first;
|
||||
}
|
||||
auto i = _two_level_locks.try_emplace(pk, this).first;
|
||||
single_lock_stats &single_lock_stats = exclusive ? stats.exclusive_partition : stats.shared_partition;
|
||||
single_lock_stats.operations_currently_waiting_for_lock++;
|
||||
utils::latency_counter waiting_latency;
|
||||
@@ -90,11 +86,7 @@ row_locker::lock_pk(const dht::decorated_key& pk, bool exclusive, db::timeout_cl
|
||||
future<row_locker::lock_holder>
|
||||
row_locker::lock_ck(const dht::decorated_key& pk, const clustering_key_prefix& cpk, bool exclusive, db::timeout_clock::time_point timeout, stats& stats) {
|
||||
mylog.debug("taking shared lock on partition {}, and {} lock on row {} in it", pk, (exclusive ? "exclusive" : "shared"), cpk);
|
||||
auto i = _two_level_locks.find(pk);
|
||||
if (i == _two_level_locks.end()) {
|
||||
// Not yet locked, we need to create the lock. This makes a copy of pk.
|
||||
i = _two_level_locks.emplace(pk, this).first;
|
||||
}
|
||||
auto i = _two_level_locks.try_emplace(pk, this).first;
|
||||
future<lock_type::holder> lock_partition = i->second._partition_lock.hold_read_lock(timeout);
|
||||
auto j = i->second._row_locks.find(cpk);
|
||||
if (j == i->second._row_locks.end()) {
|
||||
|
||||
@@ -189,10 +189,7 @@ public:
|
||||
};
|
||||
|
||||
future<> timestamp_based_splitting_mutation_writer::write_to_bucket(bucket_id bucket, mutation_fragment&& mf) {
|
||||
auto it = _buckets.find(bucket);
|
||||
if (it == _buckets.end()) {
|
||||
std::tie(it, std::ignore) = _buckets.emplace(bucket, bucket_writer(_schema, _consumer));
|
||||
}
|
||||
auto it = _buckets.try_emplace(bucket, _schema, _consumer).first;
|
||||
|
||||
auto& writer = it->second;
|
||||
|
||||
|
||||
@@ -1742,12 +1742,11 @@ void storage_proxy_stats::split_stats::register_metrics_for(gms::inet_address ep
|
||||
sstring dc = get_dc(ep);
|
||||
// if this is the first time we see an endpoint from this DC - add a
|
||||
// corresponding collectd metric
|
||||
if (!_dc_stats.contains(dc)) {
|
||||
if (auto [ignored, added] = _dc_stats.try_emplace(dc); added) {
|
||||
_metrics.add_group(_category, {
|
||||
sm::make_derive(_short_description_prefix + sstring("_remote_node"), [this, dc] { return _dc_stats[dc].val; },
|
||||
sm::description(seastar::format("{} when communicating with external Nodes in DC {}", _long_description_prefix, dc)), {storage_proxy_stats::current_scheduling_group_label(), datacenter_label(dc), op_type_label(_op_type)})
|
||||
});
|
||||
_dc_stats.emplace(dc, stats_counter{});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3305,15 +3304,12 @@ public:
|
||||
auto diff = v.par
|
||||
? m.partition().difference(schema, v.par->mut().unfreeze(schema).partition())
|
||||
: mutation_partition(*schema, m.partition());
|
||||
auto it = _diffs[m.token()].find(v.from);
|
||||
std::optional<mutation> mdiff;
|
||||
if (!diff.empty()) {
|
||||
has_diff = true;
|
||||
mdiff = mutation(schema, m.decorated_key(), std::move(diff));
|
||||
}
|
||||
if (it == _diffs[m.token()].end()) {
|
||||
_diffs[m.token()].emplace(v.from, std::move(mdiff));
|
||||
} else {
|
||||
if (auto [it, added] = _diffs[m.token()].try_emplace(v.from, std::move(mdiff)); !added) {
|
||||
// should not really happen, but lets try to deal with it
|
||||
if (mdiff) {
|
||||
if (it->second) {
|
||||
|
||||
@@ -1993,12 +1993,7 @@ future<std::unordered_map<sstring, std::vector<sstring>>> storage_service::descr
|
||||
});
|
||||
}, std::move(results), [] (auto results, auto host_and_version) {
|
||||
auto version = host_and_version.second ? host_and_version.second->to_sstring() : UNREACHABLE;
|
||||
auto it = results.find(version);
|
||||
if (it == results.end()) {
|
||||
results.emplace(std::move(version), std::vector<sstring> { host_and_version.first.to_sstring() });
|
||||
} else {
|
||||
it->second.emplace_back(host_and_version.first.to_sstring());
|
||||
}
|
||||
results.try_emplace(version).first->second.emplace_back(host_and_version.first.to_sstring());
|
||||
return results;
|
||||
}).then([] (auto results) {
|
||||
// we're done: the results map is ready to return to the client. the rest is just debug logging:
|
||||
|
||||
Reference in New Issue
Block a user