mirror of
https://github.com/scylladb/scylladb.git
synced 2026-05-12 19:02:12 +00:00
s3_client: improve exception handling for chunked downloads
Refactor the wrapping exception used in `chunked_download_source` to
prevent the retry strategy from reattempting failed requests. The new
implementation preserves the original `exception_ptr`, making the root
cause clearer and easier to diagnose.
(cherry picked from commit 1d34657b14)
This commit is contained in:
committed by
GitHub Action
parent
f9bc211966
commit
94d49da8ec
@@ -758,10 +758,9 @@ void test_chunked_download_data_source(const client_maker_function& client_maker
|
||||
}
|
||||
}
|
||||
};
|
||||
BOOST_REQUIRE_EXCEPTION(
|
||||
reader(), storage_io_error, [](const storage_io_error& e) {
|
||||
return e.what() == "S3 request failed. Code: 16. Reason: "sv;
|
||||
});
|
||||
BOOST_REQUIRE_EXCEPTION(reader(), aws::aws_exception, [](const aws::aws_exception& e) {
|
||||
return e.what() == "Injected ResourceNotFound"sv;
|
||||
});
|
||||
#else
|
||||
testlog.info("Skipping error injection test, as it requires SCYLLA_ENABLE_ERROR_INJECTION to be enabled");
|
||||
#endif
|
||||
|
||||
@@ -1236,7 +1236,7 @@ class client::chunked_download_source final : public seastar::data_source_impl {
|
||||
while (_buffers_size < _max_buffers_size && !_is_finished) {
|
||||
utils::get_local_injector().inject("kill_s3_inflight_req", [] {
|
||||
// Inject non-retryable error to emulate source failure
|
||||
throw aws::aws_exception(aws::aws_error::get_errors().at("ResourceNotFound"));
|
||||
throw aws::aws_exception(aws::aws_error(aws::aws_error_type::RESOURCE_NOT_FOUND, "Injected ResourceNotFound", aws::retryable::no));
|
||||
});
|
||||
s3l.trace("Fiber for object '{}' will try to read within range {}", _object_name, _range);
|
||||
temporary_buffer<char> buf;
|
||||
@@ -1280,18 +1280,21 @@ class client::chunked_download_source final : public seastar::data_source_impl {
|
||||
co_await in.close();
|
||||
if (ex) {
|
||||
auto aws_ex = aws::aws_error::from_exception_ptr(ex);
|
||||
if (aws_ex.is_retryable()) {
|
||||
s3l.debug("Fiber for object '{}' rethrowing filler aws_exception {}", _object_name, ex);
|
||||
throw filler_exception(format("{}", ex).c_str());
|
||||
}
|
||||
std::rethrow_exception(ex);
|
||||
s3l.debug("Fiber for object '{}' rethrowing filler aws_exception {}", _object_name, ex);
|
||||
throw filler_exception(
|
||||
ex, aws_ex.is_retryable() == aws::retryable::no && aws_ex.get_error_type() != aws::aws_error_type::EXPIRED_TOKEN);
|
||||
}
|
||||
},
|
||||
{},
|
||||
_as);
|
||||
_is_contiguous_mode = _buffers_size < _max_buffers_size * _buffers_high_watermark;
|
||||
} catch (const filler_exception& ex) {
|
||||
s3l.warn("Fiber for object '{}' experienced an error in buffer filling loop. Reason: {}. Re-issuing the request", _object_name, ex);
|
||||
if (ex._should_abort) {
|
||||
s3l.warn("Fiber for object '{}' experienced a non-retryable error in buffer filling loop. Reason: {}. Exiting", _object_name, ex._original_exception);
|
||||
_get_cv.broken(ex._original_exception);
|
||||
co_return;
|
||||
}
|
||||
s3l.warn("Fiber for object '{}' experienced an error in buffer filling loop. Reason: {}. Re-issuing the request", _object_name, ex._original_exception);
|
||||
} catch (...) {
|
||||
s3l.trace("Fiber for object '{}' failed: {}, exiting", _object_name, std::current_exception());
|
||||
_get_cv.broken(std::current_exception());
|
||||
|
||||
@@ -90,8 +90,10 @@ struct stats {
|
||||
std::time_t last_modified;
|
||||
};
|
||||
|
||||
struct filler_exception final : std::runtime_error {
|
||||
explicit filler_exception(const char* msg) : std::runtime_error(msg) {}
|
||||
struct filler_exception final : std::exception {
|
||||
filler_exception(std::exception_ptr original_exception, bool should_abort) : _original_exception(std::move(original_exception)), _should_abort(should_abort) {}
|
||||
std::exception_ptr _original_exception;
|
||||
bool _should_abort{false};
|
||||
};
|
||||
|
||||
future<> ignore_reply(const http::reply& rep, input_stream<char>&& in_);
|
||||
|
||||
Reference in New Issue
Block a user