Compare commits

...

8 Commits

Author SHA1 Message Date
Calle Wilund
1d449b47e1 commit log reader bugfix: Fix tried to read entries across chunk bounds
read_entry did not verify that current chunk has enough data left
for a minimal entry. Thus we could try to read an entry from the slack
left in a chunk, and get lost in the file (pos > next, skip very much
-> eof). And also give false errors about corruption.
Message-Id: <1452517700-599-1-git-send-email-calle@scylladb.com>

(cherry picked from commit 7f4985a017)
2016-01-12 10:29:31 +02:00
Avi Kivity
b7d5fbe967 db: reduce log spam when ignoring an sstable
With 10 sstables/shard and 50 shards, we get ~10*50*50 messages = 25,000
log messages about sstables being ignored.  This is not reasonable.

Reduce the log level to debug, and move the message to database.cc,
because at its original location, the containing function has nothing to
do with the message itself.

Reviewed-by: Raphael S. Carvalho <raphaelsc@cloudius-systems.com>
Message-Id: <1452181687-7665-1-git-send-email-avi@scylladb.com>
2016-01-07 19:26:18 +02:00
Vlad Zolotarov
149aea32e7 database: filter out a manifest.json files
Filter out manifest.json files when reading sstables during
bootup and when loading new sstables ('nodetool refresh').

Fixes issue #529

Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
Message-Id: <1451911734-26511-3-git-send-email-vladz@cloudius-systems.com>
2016-01-07 18:00:09 +02:00
Vlad Zolotarov
97c796b26b database: lister: add a filtering option
Add a possibility to pass a filter functor receiving a full path
to a directory entry and returning a boolean value: TRUE if an
entry should be enumerated and FALSE - if it should be filtered out.

Signed-off-by: Vlad Zolotarov <vladz@cloudius-systems.com>
Message-Id: <1451911734-26511-2-git-send-email-vladz@cloudius-systems.com>
2016-01-07 18:00:03 +02:00
Avi Kivity
1545fc505b main: wait for API http server to start
Wait for the future returned by the http server start process to resolve,
so we know it is started.  If it doesn't, we'll hit the or_terminate()
further down the line and exit with an error code.
Message-Id: <1452092806-11508-3-git-send-email-avi@scylladb.com>
2016-01-07 16:50:27 +02:00
Avi Kivity
b690eaef38 snitch: intentionally leak snitch singleton
Because our shutdown process is crippled (refs #293), we won't shutdown the
snitch correctly, and the sharded<> instance can assert during shutdown.
This interferes with the next patch, which adds orderly shutdown if the http
server fails to start.

Leak it intentionally to work around the problem.
Message-Id: <1452092806-11508-2-git-send-email-avi@scylladb.com>
2016-01-07 16:50:22 +02:00
Benoît Canet
7b5df973fa config: Mark ssl_storage_port as Used
Signed-off-by: Benoît Canet <benoit@scylladb.com>
Message-Id: <1452082041-6117-1-git-send-email-benoit@scylladb.com>
2016-01-06 20:20:59 +02:00
Pekka Enberg
c33815211f release: prepare for 0.15 2016-01-06 12:16:58 +02:00
8 changed files with 59 additions and 16 deletions

View File

@@ -1,6 +1,6 @@
#!/bin/sh
VERSION=development
VERSION=0.15
if test -f version
then

View File

@@ -358,23 +358,32 @@ column_family::for_all_partitions_slow(std::function<bool (const dht::decorated_
class lister {
public:
using dir_entry_types = std::unordered_set<directory_entry_type, enum_hash<directory_entry_type>>;
using walker_type = std::function<future<> (directory_entry)>;
using filter_type = std::function<bool (const sstring&)>;
private:
file _f;
std::function<future<> (directory_entry de)> _walker;
walker_type _walker;
filter_type _filter;
dir_entry_types _expected_type;
subscription<directory_entry> _listing;
sstring _dirname;
public:
lister(file f, dir_entry_types type, std::function<future<> (directory_entry)> walker, sstring dirname)
lister(file f, dir_entry_types type, walker_type walker, sstring dirname)
: _f(std::move(f))
, _walker(std::move(walker))
, _filter([] (const sstring& fname) { return true; })
, _expected_type(type)
, _listing(_f.list_directory([this] (directory_entry de) { return _visit(de); }))
, _dirname(dirname) {
}
static future<> scan_dir(sstring name, dir_entry_types type, std::function<future<> (directory_entry)> walker);
lister(file f, dir_entry_types type, walker_type walker, filter_type filter, sstring dirname)
: lister(std::move(f), type, std::move(walker), dirname) {
_filter = std::move(filter);
}
static future<> scan_dir(sstring name, dir_entry_types type, walker_type walker, filter_type filter = [] (const sstring& fname) { return true; });
protected:
future<> _visit(directory_entry de) {
@@ -383,6 +392,12 @@ protected:
if ((!_expected_type.count(*(de.type))) || (de.name[0] == '.')) {
return make_ready_future<>();
}
// apply a filter
if (!_filter(_dirname + "/" + de.name)) {
return make_ready_future<>();
}
return _walker(de);
});
@@ -403,9 +418,9 @@ private:
};
future<> lister::scan_dir(sstring name, lister::dir_entry_types type, std::function<future<> (directory_entry)> walker) {
return engine().open_directory(name).then([type, walker = std::move(walker), name] (file f) {
auto l = make_lw_shared<lister>(std::move(f), type, walker, name);
future<> lister::scan_dir(sstring name, lister::dir_entry_types type, walker_type walker, filter_type filter) {
return engine().open_directory(name).then([type, walker = std::move(walker), filter = std::move(filter), name] (file f) {
auto l = make_lw_shared<lister>(std::move(f), type, walker, filter, name);
return l->done().then([l] { });
});
}
@@ -453,6 +468,9 @@ future<sstables::entry_descriptor> column_family::probe_file(sstring sstdir, sst
return std::move(fut).then([this, sstdir = std::move(sstdir), comps] (range<partition_key> r) {
// Checks whether or not sstable belongs to current shard.
if (!belongs_to_current_shard(*_schema, std::move(r))) {
dblog.debug("sstable {} not relevant for this shard, ignoring",
sstables::sstable::filename(sstdir, _schema->ks_name(), _schema->cf_name(), comps.version, comps.generation, comps.format,
sstables::sstable::component_type::Data));
sstable::mark_sstable_for_deletion(_schema->ks_name(), _schema->cf_name(), sstdir, comps.generation, comps.version, comps.format);
return make_ready_future<>();
}
@@ -672,7 +690,7 @@ column_family::reshuffle_sstables(int64_t start) {
// Those SSTables are not known by anyone in the system. So we don't have any kind of
// object describing them. There isn't too much of a choice.
return work.sstables[comps.generation]->read_toc();
}).then([&work] {
}, &manifest_json_filter).then([&work] {
// Note: cannot be parallel because we will be shuffling things around at this stage. Can't race.
return do_for_each(work.sstables, [&work] (auto& pair) {
auto&& comps = std::move(work.descriptors.at(pair.first));
@@ -838,6 +856,17 @@ lw_shared_ptr<sstable_list> column_family::get_sstables() {
return _sstables;
}
inline bool column_family::manifest_json_filter(const sstring& fname) {
using namespace boost::filesystem;
path entry_path(fname);
if (!is_directory(status(entry_path)) && entry_path.filename() == path("manifest.json")) {
return false;
}
return true;
}
future<> column_family::populate(sstring sstdir) {
// We can catch most errors when we try to load an sstable. But if the TOC
// file is the one missing, we won't try to load the sstable at all. This
@@ -899,7 +928,7 @@ future<> column_family::populate(sstring sstdir) {
futures.push_back(std::move(f));
return make_ready_future<>();
}).then([&futures] {
}, &manifest_json_filter).then([&futures] {
return when_all(futures.begin(), futures.end()).then([] (std::vector<future<>> ret) {
try {
for (auto& f : ret) {
@@ -919,7 +948,7 @@ future<> column_family::populate(sstring sstdir) {
sstables::sstable::format_types format = descriptor->format.value();
if (engine().cpu_id() != 0) {
dblog.info("At directory: {}, partial SSTable with generation {} not relevant for this shard, ignoring", sstdir, v.first);
dblog.debug("At directory: {}, partial SSTable with generation {} not relevant for this shard, ignoring", sstdir, v.first);
return make_ready_future<>();
}
// shard 0 is the responsible for removing a partial sstable.

View File

@@ -351,6 +351,9 @@ private:
// one are also complete
future<> seal_active_memtable();
// filter manifest.json files out
static bool manifest_json_filter(const sstring& fname);
seastar::gate _in_flight_seals;
// Iterate over all partitions. Protocol is the same as std::all_of(),

View File

@@ -1350,6 +1350,17 @@ db::commitlog::read_log_file(file f, commit_load_reader_func next, position_type
}
future<> read_entry() {
static constexpr size_t entry_header_size = segment::entry_overhead_size - sizeof(uint32_t);
/**
* #598 - Must check that data left in chunk is enough to even read an entry.
* If not, this is small slack space in the chunk end, and we should just go
* to the next.
*/
assert(pos <= next);
if ((pos + entry_header_size) >= next) {
return skip(next - pos);
}
return fin.read_exactly(entry_header_size).then([this](temporary_buffer<char> buf) {
replay_position rp(id, position_type(pos));

View File

@@ -687,7 +687,7 @@ public:
"\tkeyfile: (Default: conf/scylla.key) PEM Key file associated with certificate.\n" \
"Related information: Client-to-node encryption" \
) \
val(ssl_storage_port, uint32_t, 7001, Unused, \
val(ssl_storage_port, uint32_t, 7001, Used, \
"The SSL port for encrypted communication. Unused unless enabled in encryption_options." \
) \
val(default_log_level, sstring, "warn", Used, \

View File

@@ -149,9 +149,10 @@ public:
virtual void set_local_private_addr(const sstring& addr_str) {};
static distributed<snitch_ptr>& snitch_instance() {
static distributed<snitch_ptr> snitch_inst;
// FIXME: leaked intentionally to avoid shutdown problems, see #293
static distributed<snitch_ptr>* snitch_inst = new distributed<snitch_ptr>();
return snitch_inst;
return *snitch_inst;
}
static snitch_ptr& get_local_snitch_ptr() {

View File

@@ -299,10 +299,10 @@ int main(int ac, char** av) {
return dns::gethostbyname(api_address);
}).then([&db, api_address, api_port, &ctx] (dns::hostent e){
auto ip = e.addresses[0].in.s_addr;
ctx.http_server.start().then([api_address, api_port, ip, &ctx] {
return ctx.http_server.start().then([api_address, api_port, ip, &ctx] {
return set_server(ctx);
}).then([api_address, api_port, ip, &ctx] {
ctx.http_server.listen(ipv4_addr{ip, api_port});
return ctx.http_server.listen(ipv4_addr{ip, api_port});
}).then([api_address, api_port] {
print("Seastar HTTP server listening on %s:%s ...\n", api_address, api_port);
});

View File

@@ -1755,7 +1755,6 @@ sstable::get_sstable_key_range(const schema& s, sstring ks, sstring cf, sstring
void sstable::mark_sstable_for_deletion(sstring ks, sstring cf, sstring dir, int64_t generation, version_types v, format_types f) {
auto sst = sstable(ks, cf, dir, generation, v, f);
sstlog.info("sstable {} not relevant for this shard, ignoring", sst.get_filename());
sst.mark_for_deletion();
}