build: disable implicit fallthrough

Prevent switch case statements from falling through without annotation
([[fallthrough]]) proving that this was intended.

Existing intended cases were annotated.

Closes #14607
This commit is contained in:
Avi Kivity
2023-07-10 16:58:54 +03:00
committed by Tomasz Grabiec
parent d645e7a515
commit 0cabf4eeb9
12 changed files with 77 additions and 3 deletions

View File

@@ -428,6 +428,7 @@ void time_window_compaction_strategy::update_estimated_compaction_by_tasks(time_
break;
case bucket_compaction_mode::major:
n++;
break;
default:
break;
}

View File

@@ -1415,6 +1415,7 @@ wasm_deps['wasm/test_word_double.wat'] = 'test/resource/wasm/c/test_word_double.
warnings = [
'-Wall',
'-Werror',
'-Wimplicit-fallthrough',
'-Wno-mismatched-tags', # clang-only
'-Wno-tautological-compare',
'-Wno-c++11-narrowing',

View File

@@ -86,6 +86,7 @@ size_t block_for_each_quorum(const locator::effective_replication_map& erm) {
size_t block_for(const locator::effective_replication_map& erm, consistency_level cl) {
switch (cl) {
case consistency_level::ONE:
[[fallthrough]];
case consistency_level::LOCAL_ONE:
return 1;
case consistency_level::ANY:
@@ -95,11 +96,13 @@ size_t block_for(const locator::effective_replication_map& erm, consistency_leve
case consistency_level::THREE:
return 3;
case consistency_level::QUORUM:
[[fallthrough]];
case consistency_level::SERIAL:
return quorum_for(erm);
case consistency_level::ALL:
return erm.get_replication_factor();
case consistency_level::LOCAL_QUORUM:
[[fallthrough]];
case consistency_level::LOCAL_SERIAL:
return block_for_local_serial(erm);
case consistency_level::EACH_QUORUM:
@@ -216,6 +219,7 @@ void assure_sufficient_live_nodes(
break;
}
// Fallthough on purpose for SimpleStrategy
[[fallthrough]];
default:
size_t live = live_endpoints.size();
size_t pending = pending_endpoints.size();
@@ -373,6 +377,7 @@ is_sufficient_live_nodes(consistency_level cl,
return true;
}
}
[[fallthrough]];
// Fallthough on purpose for SimpleStrategy
default:
return live_endpoints.size() >= block_for(erm, cl);
@@ -393,6 +398,7 @@ void validate_for_read(consistency_level cl) {
void validate_for_write(consistency_level cl) {
switch (cl) {
case consistency_level::SERIAL:
[[fallthrough]];
case consistency_level::LOCAL_SERIAL:
throw exceptions::invalid_request_exception("You must use conditional updates for serializable writes");
default:
@@ -404,6 +410,7 @@ void validate_for_write(consistency_level cl) {
void validate_for_cas_learn(consistency_level cl, const sstring& keyspace) {
switch (cl) {
case consistency_level::SERIAL:
[[fallthrough]];
case consistency_level::LOCAL_SERIAL:
throw exceptions::invalid_request_exception(format("{} is not supported as conditional update commit consistency. Use ANY if you mean \"make sure it is accepted but I don't care how many replicas commit it for non-SERIAL reads\"", cl));
default:

View File

@@ -409,6 +409,7 @@ schema::schema(private_tag, const raw_schema& raw, std::optional<raw_view_info>
def._thrift_bits.is_on_all_components = true;
break;
}
[[fallthrough]];
default:
// Or any other column where "comparator" is not compound
def._thrift_bits.is_on_all_components = !thrift().has_compound_comparator();

View File

@@ -273,6 +273,7 @@ future<group0_guard> raft_group0_client::start_operation(seastar::abort_source*
case group0_upgrade_state::recovery:
logger.warn("starting operation in RECOVERY mode (using old procedures)");
[[fallthrough]];
case group0_upgrade_state::use_pre_raft_procedures:
co_return group0_guard {
std::make_unique<group0_guard::impl>(

View File

@@ -385,7 +385,9 @@ future<> storage_service::topology_state_load(cdc::generation_service& cdc_gen_s
}
switch (*state) {
case topology::transition_state::commit_cdc_generation:
[[fallthrough]];
case topology::transition_state::publish_cdc_generation:
[[fallthrough]];
case topology::transition_state::write_both_read_old:
return read_new_t::no;
case topology::transition_state::write_both_read_new:
@@ -1322,6 +1324,7 @@ class topology_coordinator {
break;
case node_state::removing:
co_await remove_from_group0(node.id);
[[fallthrough]];
case node_state::decommissioning: {
topology_mutation_builder builder(node.guard.write_timestamp());
auto next_state = node.rs->state == node_state::decommissioning

View File

@@ -193,18 +193,21 @@ public:
_state = state::KEY_BYTES;
break;
}
[[fallthrough]];
case state::KEY_BYTES:
sstlog.trace("{}: pos {} state {} - size={}", fmt::ptr(this), current_pos(), state::KEY_BYTES, this->_u16);
if (this->read_bytes_contiguous(data, this->_u16, _key) != continuous_data_consumer::read_status::ready) {
_state = state::POSITION;
break;
}
[[fallthrough]];
case state::POSITION:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::POSITION);
if (read_vint_or_uint64(data) != continuous_data_consumer::read_status::ready) {
_state = state::PROMOTED_SIZE;
break;
}
[[fallthrough]];
case state::PROMOTED_SIZE:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::PROMOTED_SIZE);
_position = this->_u64;
@@ -212,6 +215,7 @@ public:
_state = state::PARTITION_HEADER_LENGTH_1;
break;
}
[[fallthrough]];
case state::PARTITION_HEADER_LENGTH_1: {
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::PARTITION_HEADER_LENGTH_1);
auto promoted_index_size_with_header = get_uint32();
@@ -230,10 +234,12 @@ public:
break;
}
}
[[fallthrough]];
case state::PARTITION_HEADER_LENGTH_2:
sstlog.trace("{}: pos {} state {} {}", fmt::ptr(this), current_pos(), state::PARTITION_HEADER_LENGTH_2, this->_u64);
_partition_header_length = this->_u64;
state_LOCAL_DELETION_TIME:
[[fallthrough]];
case state::LOCAL_DELETION_TIME:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::LOCAL_DELETION_TIME);
_deletion_time.emplace();
@@ -241,6 +247,7 @@ public:
_state = state::MARKED_FOR_DELETE_AT;
break;
}
[[fallthrough]];
case state::MARKED_FOR_DELETE_AT:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::MARKED_FOR_DELETE_AT);
_deletion_time->local_deletion_time = this->_u32;
@@ -248,6 +255,7 @@ public:
_state = state::NUM_PROMOTED_INDEX_BLOCKS;
break;
}
[[fallthrough]];
case state::NUM_PROMOTED_INDEX_BLOCKS:
sstlog.trace("{}: pos {} state {}", fmt::ptr(this), current_pos(), state::NUM_PROMOTED_INDEX_BLOCKS);
_deletion_time->marked_for_delete_at = this->_u64;
@@ -256,6 +264,7 @@ public:
break;
}
state_CONSUME_ENTRY:
[[fallthrough]];
case state::CONSUME_ENTRY: {
auto promoted_index_start = current_pos();
auto promoted_index_size = _promoted_index_end - promoted_index_start;

View File

@@ -123,6 +123,7 @@ public:
_state = state::CK_KIND;
return read_status::waiting;
}
[[fallthrough]];
case state::CK_KIND:
kind = bound_kind_m{_primitive._u8};
if (kind == bound_kind_m::clustering) {
@@ -133,10 +134,12 @@ public:
_state = state::CK_SIZE;
return read_status::waiting;
}
[[fallthrough]];
case state::CK_SIZE:
if (_primitive._u16 < _s.clustering_key_size()) {
ck_range.drop_back(_s.clustering_key_size() - _primitive._u16);
}
[[fallthrough]];
case state::CK_BLOCK:
ck_block_label:
if (no_more_ck_blocks()) {
@@ -152,8 +155,10 @@ public:
_state = state::CK_BLOCK_HEADER;
return read_status::waiting;
}
[[fallthrough]];
case state::CK_BLOCK_HEADER:
ck_blocks_header = _primitive._u64;
[[fallthrough]];
case state::CK_BLOCK2:
ck_block2_label:
{
@@ -177,6 +182,7 @@ public:
return read_status::waiting;
}
}
[[fallthrough]];
case state::CK_BLOCK_END:
clustering_key_values.push_back(std::move(column_value));
move_to_next_ck_block();
@@ -254,25 +260,27 @@ public:
_start_pos = _clustering.get_and_reset();
_clustering.set_parsing_start_key(false);
_state = state::END;
// fall-through
[[fallthrough]];
case state::END:
if (_clustering.consume(data) == read_status::waiting) {
return read_status::waiting;
}
_end_pos = _clustering.get_and_reset();
_state = state::OFFSET;
// fall-through
[[fallthrough]];
case state::OFFSET:
if (_primitive.read_unsigned_vint(data) != read_status::ready) {
_state = state::WIDTH;
return read_status::waiting;
}
[[fallthrough]];
case state::WIDTH:
_offset = _primitive._u64;
if (_primitive.read_signed_vint(data) != read_status::ready) {
_state = state::END_OPEN_MARKER_FLAG;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_FLAG:
assert(_primitive._i64 + width_base > 0);
_width = (_primitive._i64 + width_base);
@@ -280,6 +288,7 @@ public:
_state = state::END_OPEN_MARKER_LOCAL_DELETION_TIME;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_LOCAL_DELETION_TIME:
if (_primitive._u8 == 0) {
_state = state::DONE;
@@ -290,12 +299,14 @@ public:
_state = state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_1;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_1:
_end_open_marker->local_deletion_time = _primitive._u32;
if (_primitive.read_64(data) != read_status::ready) {
_state = state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_2;
return read_status::waiting;
}
[[fallthrough]];
case state::END_OPEN_MARKER_MARKED_FOR_DELETE_AT_2:
_end_open_marker->marked_for_delete_at = _primitive._u64;
_state = state::DONE;

View File

@@ -90,32 +90,38 @@ private:
ctx.state = state_k_l::START_NAME_BYTES;
return;
}
[[fallthrough]];
case state_k_l::START_NAME_BYTES:
if (this->read_bytes_contiguous(data, this->_u16, ctx.start) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::END_NAME_LENGTH;
return;
}
[[fallthrough]];
case state_k_l::END_NAME_LENGTH:
if (this->read_16(data) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::END_NAME_BYTES;
return;
}
[[fallthrough]];
case state_k_l::END_NAME_BYTES:
if (this->read_bytes_contiguous(data, this->_u16, ctx.end) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::OFFSET;
return;
}
[[fallthrough]];
case state_k_l::OFFSET:
if (this->read_64(data) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::WIDTH;
return;
}
[[fallthrough]];
case state_k_l::WIDTH:
ctx.offset = this->_u64;
if (this->read_64(data) != continuous_data_consumer::read_status::ready) {
ctx.state = state_k_l::ADD_BLOCK;
return;
}
[[fallthrough]];
case state_k_l::ADD_BLOCK:
ctx.width = this->_u64;
ctx.state = state_k_l::START_NAME_LENGTH;

View File

@@ -68,7 +68,7 @@ public:
_state = 1;
break;
}
// fall-through
[[fallthrough]];
case 1:
check(_u64);
++_count;

View File

@@ -106,16 +106,22 @@ uint64_t hash2_64(bytes_view key, uint64_t seed)
break;
case 7:
h64 ^= (uint64_t) key[length - rem + 6] << 48;
[[fallthrough]];
case 6:
h64 ^= (uint64_t) key[length - rem + 5] << 40;
[[fallthrough]];
case 5:
h64 ^= (uint64_t) key[length - rem + 4] << 32;
[[fallthrough]];
case 4:
h64 ^= (uint64_t) key[length - rem + 3] << 24;
[[fallthrough]];
case 3:
h64 ^= (uint64_t) key[length - rem + 2] << 16;
[[fallthrough]];
case 2:
h64 ^= (uint64_t) key[length - rem + 1] << 8;
[[fallthrough]];
case 1:
h64 ^= (uint64_t) key[length - rem];
h64 *= m64;
@@ -182,20 +188,34 @@ void hash3_x64_128(bytes_view key, uint64_t seed, std::array<uint64_t,2> &result
switch (length & 15)
{
case 15: k2 ^= ((uint64_t) key[14]) << 48;
[[fallthrough]];
case 14: k2 ^= ((uint64_t) key[13]) << 40;
[[fallthrough]];
case 13: k2 ^= ((uint64_t) key[12]) << 32;
[[fallthrough]];
case 12: k2 ^= ((uint64_t) key[11]) << 24;
[[fallthrough]];
case 11: k2 ^= ((uint64_t) key[10]) << 16;
[[fallthrough]];
case 10: k2 ^= ((uint64_t) key[9]) << 8;
[[fallthrough]];
case 9: k2 ^= ((uint64_t) key[8]) << 0;
k2 *= c2; k2 = rotl64(k2,33); k2 *= c1; h2 ^= k2;
[[fallthrough]];
case 8: k1 ^= ((uint64_t) key[7]) << 56;
[[fallthrough]];
case 7: k1 ^= ((uint64_t) key[6]) << 48;
[[fallthrough]];
case 6: k1 ^= ((uint64_t) key[5]) << 40;
[[fallthrough]];
case 5: k1 ^= ((uint64_t) key[4]) << 32;
[[fallthrough]];
case 4: k1 ^= ((uint64_t) key[3]) << 24;
[[fallthrough]];
case 3: k1 ^= ((uint64_t) key[2]) << 16;
[[fallthrough]];
case 2: k1 ^= ((uint64_t) key[1]) << 8;
[[fallthrough]];
case 1: k1 ^= ((uint64_t) key[0]);
k1 *= c1; k1 = rotl64(k1,31); k1 *= c2; h1 ^= k1;
};

View File

@@ -100,20 +100,34 @@ void hash3_x64_128(InputIterator in, uint32_t length, uint64_t seed, std::array<
switch (length & 15)
{
case 15: k2 ^= ((uint64_t) tmp[14]) << 48;
[[fallthrough]];
case 14: k2 ^= ((uint64_t) tmp[13]) << 40;
[[fallthrough]];
case 13: k2 ^= ((uint64_t) tmp[12]) << 32;
[[fallthrough]];
case 12: k2 ^= ((uint64_t) tmp[11]) << 24;
[[fallthrough]];
case 11: k2 ^= ((uint64_t) tmp[10]) << 16;
[[fallthrough]];
case 10: k2 ^= ((uint64_t) tmp[9]) << 8;
[[fallthrough]];
case 9: k2 ^= ((uint64_t) tmp[8]) << 0;
k2 *= c2; k2 = rotl64(k2,33); k2 *= c1; h2 ^= k2;
[[fallthrough]];
case 8: k1 ^= ((uint64_t) tmp[7]) << 56;
[[fallthrough]];
case 7: k1 ^= ((uint64_t) tmp[6]) << 48;
[[fallthrough]];
case 6: k1 ^= ((uint64_t) tmp[5]) << 40;
[[fallthrough]];
case 5: k1 ^= ((uint64_t) tmp[4]) << 32;
[[fallthrough]];
case 4: k1 ^= ((uint64_t) tmp[3]) << 24;
[[fallthrough]];
case 3: k1 ^= ((uint64_t) tmp[2]) << 16;
[[fallthrough]];
case 2: k1 ^= ((uint64_t) tmp[1]) << 8;
[[fallthrough]];
case 1: k1 ^= ((uint64_t) tmp[0]);
k1 *= c1; k1 = rotl64(k1,31); k1 *= c2; h1 ^= k1;
};