alternator: Limit attribute name lengths
Attribute names are now checked against DynamoDB-compatible length limits. When exceeded, Alternator emits exception identical or similar to the DDB one. It might be worth noting that DDB emits more than a single kind of an exception string for some exceptions. The tests' catch clauses handle all the observed kinds of messages from DynamoDB. The validation differentiates between key and non-key attributes and applies the limit accordingly. AWS DDB raises exceptions with somewhat different contents when the get request contains ProjectionExpression, so this case needed separate treatment to emit the corresponding exception string. The length-validating function was declared and defined in expressions.hh/.cc respectively, because that's where the relevant parsing happens. ** Tests The following tests were validated when handling this issue: test_limit_attribute_length_nonkey_good, test_limit_attribute_length_nonkey_bad, test_limit_attribute_length_key_good, test_limit_attribute_length_key_bad, test_limit_attribute_length_gsi_lsi_good, test_limit_attribute_length_gsi_lsi_bad, test_limit_attribute_length_gsi_lsi_projection_bad. Some of the tests were expanded into being more granular. Namely, there is a new test function `test_limit_attribute_length_key_bad_incoherent_names` which groups tests with too long attribute names in the case of incorrect (incoherent) user requests. Similarily, there is a new test function `test_limit_attribute_length_gsi_lsi_bad_incoherent_names` All the tests cover now each combination of the key/keys being too long. Both the new fuctions contain tests that verify that ScyllaDB throws length-related exceptions (instead of the coherency-related), similar to what DynamoDB does. The new test test_limit_gsiu_key_len_bad covers the case of too long attribute name inside GlobalSecondaryIndexUpdates. The new test test_limit_gsiu_key_len_bad_incoherent_names covers the case of incorrect (incoherent) user requests containing too long attribute names and GlobalSecondaryIndexUpdates. test_limit_attribute_length_key_bad was found to have contaned an illegal KeySchema structure. Some of the tests were corrected their match clause. All the tests are stripped of the xfail flag except test_limit_attribute_length_key_bad, which has it changed since it still fails due to Projection in GSI and LIS not implemented in Alternator. The xfail now points to #5036. Fixes scylladb/scylladb#9169 Closes scylladb/scylladb#23097
This commit is contained in:
committed by
Nadav Har'El
parent
82e1678fbe
commit
e588c8667f
@@ -856,7 +856,7 @@ static void add_column(schema_builder& builder, const std::string& name, const r
|
||||
// the HASH key name, and the second one, if exists, must be a RANGE key name.
|
||||
// The function returns the two column names - the first is the hash key
|
||||
// and always present, the second is the range key and may be an empty string.
|
||||
static std::pair<std::string, std::string> parse_key_schema(const rjson::value& obj) {
|
||||
static std::pair<std::string, std::string> parse_key_schema(const rjson::value& obj, std::string_view supplementary_context) {
|
||||
const rjson::value *key_schema;
|
||||
if (!obj.IsObject() || !(key_schema = rjson::find(obj, "KeySchema"))) {
|
||||
throw api_error::validation("Missing KeySchema member");
|
||||
@@ -875,6 +875,7 @@ static std::pair<std::string, std::string> parse_key_schema(const rjson::value&
|
||||
if (!v || !v->IsString()) {
|
||||
throw api_error::validation("First key in KeySchema must have string AttributeName");
|
||||
}
|
||||
validate_attr_name_length(supplementary_context, v->GetStringLength(), true, "HASH key in KeySchema - ");
|
||||
std::string hash_key = v->GetString();
|
||||
std::string range_key;
|
||||
if (key_schema->Size() == 2) {
|
||||
@@ -889,6 +890,7 @@ static std::pair<std::string, std::string> parse_key_schema(const rjson::value&
|
||||
if (!v || !v->IsString()) {
|
||||
throw api_error::validation("Second key in KeySchema must have string AttributeName");
|
||||
}
|
||||
validate_attr_name_length(supplementary_context, v->GetStringLength(), true, "RANGE key in KeySchema - ");
|
||||
range_key = v->GetString();
|
||||
}
|
||||
return {hash_key, range_key};
|
||||
@@ -1155,7 +1157,7 @@ static billing_mode_type verify_billing_mode(const rjson::value& request) {
|
||||
// Return the set of attribute names defined in AttributeDefinitions - this
|
||||
// set is useful for later verifying that all of them are used by some
|
||||
// KeySchema (issue #19784)
|
||||
static std::unordered_set<std::string> validate_attribute_definitions(const rjson::value& attribute_definitions){
|
||||
static std::unordered_set<std::string> validate_attribute_definitions(std::string_view supplementary_context, const rjson::value& attribute_definitions) {
|
||||
if (!attribute_definitions.IsArray()) {
|
||||
throw api_error::validation("AttributeDefinitions must be an array");
|
||||
}
|
||||
@@ -1168,6 +1170,7 @@ static std::unordered_set<std::string> validate_attribute_definitions(const rjso
|
||||
if (!attribute_name->IsString()) {
|
||||
throw api_error::validation("AttributeName in AttributeDefinitions must be a string");
|
||||
}
|
||||
validate_attr_name_length(supplementary_context, attribute_name->GetStringLength(), true, "in AttributeDefinitions - ");
|
||||
auto [it2, added] = seen_attribute_names.emplace(rjson::to_string_view(*attribute_name));
|
||||
if (!added) {
|
||||
throw api_error::validation(fmt::format("Duplicate AttributeName={} in AttributeDefinitions",
|
||||
@@ -1288,12 +1291,12 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
// any of its GSIs or LSIs. If anything remains in this set at the end of
|
||||
// this function, it's an error.
|
||||
std::unordered_set<std::string> unused_attribute_definitions =
|
||||
validate_attribute_definitions(*attribute_definitions);
|
||||
validate_attribute_definitions("", *attribute_definitions);
|
||||
|
||||
tracing::add_table_name(trace_state, keyspace_name, table_name);
|
||||
|
||||
schema_builder builder(keyspace_name, table_name);
|
||||
auto [hash_key, range_key] = parse_key_schema(request);
|
||||
auto [hash_key, range_key] = parse_key_schema(request, "");
|
||||
add_column(builder, hash_key, *attribute_definitions, column_kind::partition_key);
|
||||
unused_attribute_definitions.erase(hash_key);
|
||||
if (!range_key.empty()) {
|
||||
@@ -1339,7 +1342,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
// FIXME: read and handle "Projection" parameter. This will
|
||||
// require the MV code to copy just parts of the attrs map.
|
||||
schema_builder view_builder(keyspace_name, vname);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(l);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(l, "Local Secondary Index");
|
||||
if (view_hash_key != hash_key) {
|
||||
co_return api_error::validation("LocalSecondaryIndex hash key must match the base table hash key");
|
||||
}
|
||||
@@ -1396,7 +1399,7 @@ static future<executor::request_return_type> create_table_on_shard0(service::cli
|
||||
// FIXME: read and handle "Projection" parameter. This will
|
||||
// require the MV code to copy just parts of the attrs map.
|
||||
schema_builder view_builder(keyspace_name, vname);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(g);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(g, "GlobalSecondaryIndexes");
|
||||
|
||||
// If an attribute is already a real column in the base table
|
||||
// (i.e., a key attribute) or we already made it a real column
|
||||
@@ -1703,7 +1706,7 @@ future<executor::request_return_type> executor::update_table(client_state& clien
|
||||
co_return api_error::validation("GlobalSecondaryIndexUpdates Create needs AttributeDefinitions");
|
||||
}
|
||||
std::unordered_set<std::string> unused_attribute_definitions =
|
||||
validate_attribute_definitions(*attribute_definitions);
|
||||
validate_attribute_definitions("GlobalSecondaryIndexUpdates", *attribute_definitions);
|
||||
check_attribute_definitions_conflicts(*attribute_definitions, *schema);
|
||||
for (auto& view : p.local().data_dictionary().find_column_family(tab).views()) {
|
||||
check_attribute_definitions_conflicts(*attribute_definitions, *view);
|
||||
@@ -1723,7 +1726,7 @@ future<executor::request_return_type> executor::update_table(client_state& clien
|
||||
// FIXME: read and handle "Projection" parameter. This will
|
||||
// require the MV code to copy just parts of the attrs map.
|
||||
schema_builder view_builder(keyspace_name, vname);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(it->value);
|
||||
auto [view_hash_key, view_range_key] = parse_key_schema(it->value, "GlobalSecondaryIndexUpdates");
|
||||
// If an attribute is already a real column in the base
|
||||
// table (i.e., a key attribute in the base table or LSI),
|
||||
// we can use it directly as a view key. Otherwise, we
|
||||
@@ -2049,6 +2052,7 @@ put_or_delete_item::put_or_delete_item(const rjson::value& item, schema_ptr sche
|
||||
bytes column_name = to_bytes(it->name.GetString());
|
||||
validate_value(it->value, "PutItem");
|
||||
const column_definition* cdef = find_attribute(*schema, column_name);
|
||||
validate_attr_name_length("", column_name.size(), cdef && cdef->is_primary_key());
|
||||
_length_in_bytes += column_name.size();
|
||||
if (!cdef) {
|
||||
// This attribute may be a key column of one of the GSI, in which
|
||||
@@ -3087,6 +3091,7 @@ static std::optional<attrs_to_get> calculate_attrs_to_get(const rjson::value& re
|
||||
attrs_to_get ret;
|
||||
for (auto it = attributes_to_get.Begin(); it != attributes_to_get.End(); ++it) {
|
||||
attribute_path_map_add("AttributesToGet", ret, it->GetString());
|
||||
validate_attr_name_length("AttributesToGet", it->GetStringLength(), false);
|
||||
}
|
||||
if (ret.empty()) {
|
||||
throw api_error::validation("Empty AttributesToGet is not allowed. Consider using Select=COUNT instead.");
|
||||
@@ -3394,6 +3399,9 @@ update_item_operation::update_item_operation(service::storage_proxy& proxy, rjso
|
||||
if (!_attribute_updates->IsObject()) {
|
||||
throw api_error::validation("AttributeUpdates must be an object");
|
||||
}
|
||||
for (auto it = std::as_const(*_attribute_updates).MemberBegin(); it != std::as_const(*_attribute_updates).MemberEnd(); ++it) {
|
||||
validate_attr_name_length("AttributeUpdates", it->name.GetStringLength(), false);
|
||||
}
|
||||
}
|
||||
|
||||
_condition_expression = get_parsed_condition_expression(_request);
|
||||
|
||||
@@ -165,7 +165,9 @@ static std::optional<std::string> resolve_path_component(const std::string& colu
|
||||
fmt::format("ExpressionAttributeNames missing entry '{}' required by expression", column_name));
|
||||
}
|
||||
used_attribute_names.emplace(column_name);
|
||||
return std::string(rjson::to_string_view(*value));
|
||||
auto result = std::string(rjson::to_string_view(*value));
|
||||
validate_attr_name_length("", result.size(), false, "ExpressionAttributeNames contains invalid value: ");
|
||||
return result;
|
||||
}
|
||||
return std::nullopt;
|
||||
}
|
||||
@@ -737,6 +739,26 @@ rjson::value calculate_value(const parsed::set_rhs& rhs,
|
||||
return rjson::null_value();
|
||||
}
|
||||
|
||||
void validate_attr_name_length(std::string_view supplementary_context, size_t attr_name_length, bool is_key, std::string_view error_msg_prefix) {
|
||||
constexpr const size_t DYNAMODB_KEY_ATTR_NAME_SIZE_MAX = 255;
|
||||
constexpr const size_t DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX = 65535;
|
||||
|
||||
const size_t max_length = is_key ? DYNAMODB_KEY_ATTR_NAME_SIZE_MAX : DYNAMODB_NONKEY_ATTR_NAME_SIZE_MAX;
|
||||
if (attr_name_length > max_length) {
|
||||
std::string error_msg;
|
||||
if (!error_msg_prefix.empty()) {
|
||||
error_msg += error_msg_prefix;
|
||||
}
|
||||
if (!supplementary_context.empty()) {
|
||||
error_msg += "in ";
|
||||
error_msg += supplementary_context;
|
||||
error_msg += " - ";
|
||||
}
|
||||
error_msg += fmt::format("Attribute name is too large, must be less than {} bytes", std::to_string(max_length + 1));
|
||||
throw api_error::validation(error_msg);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace alternator
|
||||
|
||||
auto fmt::formatter<alternator::parsed::path>::format(const alternator::parsed::path& p, fmt::format_context& ctx) const
|
||||
|
||||
@@ -91,5 +91,7 @@ rjson::value calculate_value(const parsed::value& v,
|
||||
rjson::value calculate_value(const parsed::set_rhs& rhs,
|
||||
const rjson::value* previous_item);
|
||||
|
||||
void validate_attr_name_length(std::string_view supplementary_context, size_t attr_name_length, bool is_key, std::string_view error_msg_prefix = {});
|
||||
|
||||
|
||||
} /* namespace alternator */
|
||||
|
||||
@@ -62,24 +62,23 @@ def test_limit_attribute_length_nonkey_good(test_table_s):
|
||||
# documentation suggests, the length 64KB itself is not allowed - 65535
|
||||
# (which we tested above) is the last accepted size.
|
||||
# Reproduces issue #9169.
|
||||
@pytest.mark.xfail(reason="issue #9169: attribute name limits not enforced")
|
||||
def test_limit_attribute_length_nonkey_bad(test_table_s):
|
||||
p = random_string()
|
||||
too_long_name = random_string(64)*1024
|
||||
with pytest.raises(ClientError, match='ValidationException.*Attribute name'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*6553[56]'):
|
||||
test_table_s.put_item(Item={'p': p, too_long_name: 1})
|
||||
with pytest.raises(ClientError, match='ValidationException.*Attribute name'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*6553[56]'):
|
||||
test_table_s.get_item(Key={'p': p}, ProjectionExpression='#name',
|
||||
ExpressionAttributeNames={'#name': too_long_name})
|
||||
with pytest.raises(ClientError, match='ValidationException.*Attribute name'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*6553[56]'):
|
||||
test_table_s.get_item(Key={'p': p}, AttributesToGet=[too_long_name])
|
||||
with pytest.raises(ClientError, match='ValidationException.*Attribute name'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*6553[56]'):
|
||||
test_table_s.update_item(Key={'p': p}, AttributeUpdates={too_long_name: {'Value': 2, 'Action': 'PUT'}})
|
||||
with pytest.raises(ClientError, match='ValidationException.*Attribute name'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*6553[56]'):
|
||||
test_table_s.update_item(Key={'p': p}, UpdateExpression='SET #name = :val',
|
||||
ExpressionAttributeNames={'#name': too_long_name},
|
||||
ExpressionAttributeValues={':val': 3})
|
||||
with pytest.raises(ClientError, match='ValidationException.*Attribute name'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*6553[56]'):
|
||||
test_table_s.update_item(Key={'p': p}, UpdateExpression='SET a = :val',
|
||||
ConditionExpression='#name = :val',
|
||||
ExpressionAttributeNames={'#name': too_long_name},
|
||||
@@ -106,18 +105,61 @@ def test_limit_attribute_length_key_good(dynamodb):
|
||||
# documentation - which only mentions that SI keys are limited to 255 bytes,
|
||||
# but forgets to mention base-table keys.
|
||||
# Reproduces issue #9169.
|
||||
@pytest.mark.xfail(reason="issue #9169: attribute name limits not enforced")
|
||||
def test_limit_attribute_length_key_bad(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
with pytest.raises(ClientError, match='ValidationException.*length'):
|
||||
|
||||
# Test with HASH-type or non-RANGE-type key name too long
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': too_long_name, 'KeyType': 'HASH' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': too_long_name, 'AttributeType': 'S' } ]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*length'):
|
||||
|
||||
# Test with RANGE-type (or non-HASH-type key) name too long
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'x', 'KeyType': 'HASH',
|
||||
'AttributeName': too_long_name, 'KeyType': 'RANGE' }, ],
|
||||
KeySchema=[ { 'AttributeName': 'x', 'KeyType': 'HASH', },
|
||||
{ 'AttributeName': too_long_name, 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': too_long_name, 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'x', 'AttributeType': 'S' } ]) as table:
|
||||
pass
|
||||
|
||||
# Test that *key* attribute names more than 255 characters are not allowed,
|
||||
# not for hash key and not for range key. Strangely, this limitation is not
|
||||
# explicitly mentioned in the DynamoDB documentation.
|
||||
# The tests here cover the cases of too long names in incorrect (incoherent)
|
||||
# user requests. The incoherence here means that AttributeDefinitions should
|
||||
# refer to AttributeName value that is used inside KeySchema as well.
|
||||
# In all the cases, DynamoDB returns attrribute name length-related errors,
|
||||
# ignoring the issues with the consistency.
|
||||
# Reproduces issue #9169.
|
||||
def test_limit_attribute_length_key_bad_incoherent_names(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
|
||||
# Tests with HASH-type or non-RANGE-type key name too long
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': too_long_name, 'KeyType': 'HASH' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': "incoherent-short-name", 'AttributeType': 'S' } ]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': "incoherent-short-name", 'KeyType': 'HASH' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': too_long_name, 'AttributeType': 'S' } ]) as table:
|
||||
pass
|
||||
|
||||
# Tests with RANGE-type (or non-HASH-type key) name too long
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'x', 'KeyType': 'HASH', },
|
||||
{ 'AttributeName': too_long_name, 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': "incoherent-short-name", 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'x', 'AttributeType': 'S' } ]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'x', 'KeyType': 'HASH', },
|
||||
{ 'AttributeName': "incoherent-short-name", 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': too_long_name, 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'x', 'AttributeType': 'S' } ]) as table:
|
||||
pass
|
||||
@@ -169,10 +211,11 @@ def test_limit_attribute_length_gsi_lsi_good(dynamodb):
|
||||
})
|
||||
|
||||
# Reproduces issue #9169.
|
||||
@pytest.mark.xfail(reason="issue #9169: attribute name limits not enforced")
|
||||
def test_limit_attribute_length_gsi_lsi_bad(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
with pytest.raises(ClientError, match='ValidationException.*length'):
|
||||
|
||||
# GSI attr. name length test
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
@@ -187,7 +230,9 @@ def test_limit_attribute_length_gsi_lsi_bad(dynamodb):
|
||||
}
|
||||
]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*length'):
|
||||
|
||||
# LSI attr. name length test
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
@@ -204,6 +249,84 @@ def test_limit_attribute_length_gsi_lsi_bad(dynamodb):
|
||||
]) as table:
|
||||
pass
|
||||
|
||||
# The tests here cover the cases of too long names in incorrect (incoherent)
|
||||
# user requests. The incoherence here means:
|
||||
# 1. GlobalSecondaryIndexes should refer to AttributeName value that is used
|
||||
# inside AttributeDefinitions as well.
|
||||
# 2. LocalSecondaryIndexes should refer to AttributeName value that is used
|
||||
# inside AttributeDefinitions as well.
|
||||
# In all the cases, DynamoDB returns attribute name length-related errors,
|
||||
# ignoring the issues with the consistency.
|
||||
# Reproduces issue #9169.
|
||||
def test_limit_attribute_length_gsi_lsi_bad_incoherent_names(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
|
||||
# GSI attr. name length tests
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[
|
||||
{ 'AttributeName': 'a', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'b', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': too_long_name, 'AttributeType': 'S' } ],
|
||||
GlobalSecondaryIndexes=[
|
||||
{ 'IndexName': 'gsi', 'KeySchema': [
|
||||
{ 'AttributeName': "incoherent-short-name", 'KeyType': 'HASH' },
|
||||
], 'Projection': { 'ProjectionType': 'ALL' }
|
||||
}
|
||||
]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[
|
||||
{ 'AttributeName': 'a', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'b', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': "incoherent-short-name", 'AttributeType': 'S' } ],
|
||||
GlobalSecondaryIndexes=[
|
||||
{ 'IndexName': 'gsi', 'KeySchema': [
|
||||
{ 'AttributeName': too_long_name, 'KeyType': 'HASH' },
|
||||
], 'Projection': { 'ProjectionType': 'ALL' }
|
||||
}
|
||||
]) as table:
|
||||
pass
|
||||
|
||||
# LSI attr. name length tests
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[
|
||||
{ 'AttributeName': 'a', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'b', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': too_long_name, 'AttributeType': 'S' } ],
|
||||
LocalSecondaryIndexes=[
|
||||
{ 'IndexName': 'lsi', 'KeySchema': [
|
||||
{ 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': "incoherent-short-name", 'KeyType': 'RANGE' },
|
||||
], 'Projection': { 'ProjectionType': 'ALL' }
|
||||
}
|
||||
]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
AttributeDefinitions=[
|
||||
{ 'AttributeName': 'a', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': 'b', 'AttributeType': 'S' },
|
||||
{ 'AttributeName': "incoherent-short-name", 'AttributeType': 'S' } ],
|
||||
LocalSecondaryIndexes=[
|
||||
{ 'IndexName': 'lsi', 'KeySchema': [
|
||||
{ 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': too_long_name, 'KeyType': 'RANGE' },
|
||||
], 'Projection': { 'ProjectionType': 'ALL' }
|
||||
}
|
||||
]) as table:
|
||||
pass
|
||||
|
||||
# Attribute length tests 7,8: In an LSI, projected attribute names are also
|
||||
# limited to 255 bytes. This is explicitly mentioned in the DynamoDB
|
||||
# documentation. For GSI this is also true (but not explicitly mentioned).
|
||||
@@ -211,10 +334,10 @@ def test_limit_attribute_length_gsi_lsi_bad(dynamodb):
|
||||
# attributes projected as part as ALL can be bigger (up to the usual 64KB
|
||||
# limit).
|
||||
# Reproduces issue #9169.
|
||||
@pytest.mark.xfail(reason="issue #9169: attribute name limits not enforced")
|
||||
@pytest.mark.xfail(reason="issue #5036: projection in GSI and LSI not supported")
|
||||
def test_limit_attribute_length_gsi_lsi_projection_bad(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
with pytest.raises(ClientError, match='ValidationException.*length'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
@@ -230,7 +353,7 @@ def test_limit_attribute_length_gsi_lsi_projection_bad(dynamodb):
|
||||
}
|
||||
]) as table:
|
||||
pass
|
||||
with pytest.raises(ClientError, match='ValidationException.*length'):
|
||||
with pytest.raises(ClientError, match='ValidationException.*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'a', 'KeyType': 'HASH' },
|
||||
{ 'AttributeName': 'b', 'KeyType': 'RANGE' } ],
|
||||
@@ -759,3 +882,57 @@ def test_limit_partition_key_len(test_table_s, test_table_b):
|
||||
test_table_b.put_item(Item=key)
|
||||
with pytest.raises(ClientError, match='ValidationException.*limit'):
|
||||
test_table_b.get_item(Key=key, ConsistentRead=True)
|
||||
|
||||
# Test attr name limit of UpdateTable's GlobalSecondaryIndexUpdates.
|
||||
# Reproduces issue #9169.
|
||||
def test_limit_gsiu_key_len_bad(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
|
||||
with pytest.raises(ClientError, match='ValidationException..*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' } ]) as table:
|
||||
# Now use UpdateTable to create the GSI
|
||||
dynamodb.meta.client.update_table(TableName=table.name,
|
||||
AttributeDefinitions=[{ 'AttributeName': too_long_name, 'AttributeType': 'S' }],
|
||||
GlobalSecondaryIndexUpdates=[ { 'Create':
|
||||
{ 'IndexName': 'hello',
|
||||
'KeySchema': [{ 'AttributeName': too_long_name, 'KeyType': 'HASH' }],
|
||||
'Projection': { 'ProjectionType': 'ALL' }
|
||||
}}])
|
||||
|
||||
# Test attr name limit of UpdateTable with GlobalSecondaryIndexUpdates.
|
||||
# The tests here cover the cases of too long names in incorrect (incoherent)
|
||||
# user requests. The incoherence here means that AttributeDefinitions should
|
||||
# refer to AttributeName value that is used inside GlobalSecondaryIndexUpdates
|
||||
# as well. In all the cases, DynamoDB returns attrribute name length-related
|
||||
# errors, ignoring the issues with the consistency.
|
||||
# Reproduces issue #9169.
|
||||
def test_limit_gsiu_key_len_bad_incoherent_names(dynamodb):
|
||||
too_long_name = random_string(256)
|
||||
|
||||
with pytest.raises(ClientError, match='ValidationException..*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' } ]) as table:
|
||||
# Now use UpdateTable to create the GSI
|
||||
dynamodb.meta.client.update_table(TableName=table.name,
|
||||
AttributeDefinitions=[{ 'AttributeName': too_long_name, 'AttributeType': 'S' }],
|
||||
GlobalSecondaryIndexUpdates=[ { 'Create':
|
||||
{ 'IndexName': 'hello',
|
||||
'KeySchema': [{ 'AttributeName': "incoherent-short-name", 'KeyType': 'HASH' }],
|
||||
'Projection': { 'ProjectionType': 'ALL' }
|
||||
}}])
|
||||
|
||||
with pytest.raises(ClientError, match='ValidationException..*25[56]'):
|
||||
with new_test_table(dynamodb,
|
||||
KeySchema=[ { 'AttributeName': 'p', 'KeyType': 'HASH' } ],
|
||||
AttributeDefinitions=[ { 'AttributeName': 'p', 'AttributeType': 'S' } ]) as table:
|
||||
# Now use UpdateTable to create the GSI
|
||||
dynamodb.meta.client.update_table(TableName=table.name,
|
||||
AttributeDefinitions=[{ 'AttributeName': "incoherent-short-name", 'AttributeType': 'S' }],
|
||||
GlobalSecondaryIndexUpdates=[ { 'Create':
|
||||
{ 'IndexName': 'hello',
|
||||
'KeySchema': [{ 'AttributeName': too_long_name, 'KeyType': 'HASH' }],
|
||||
'Projection': { 'ProjectionType': 'ALL' }
|
||||
}}])
|
||||
|
||||
Reference in New Issue
Block a user