Compare commits

..

1 Commits

Author SHA1 Message Date
Ben McClelland
b898c89c11 add local,ipmi,powerman fenced scripts to utils rpm
This adds the fenced scripts so that we have a place to track these and
get updates out to users. This latest version of scripts has the checks
to validate that power off succeeded and not just assume based on power
command return status.

Signed-off-by: Ben McClelland <ben.mcclelland@versity.com>
2022-07-06 15:11:21 -07:00
20 changed files with 436 additions and 305 deletions

View File

@@ -1,22 +1,6 @@
Versity ScoutFS Release Notes
=============================
---
v1.6
\
*Jul 7, 2022*
* **Fix memory leaks in rare corner cases**
\
Analysis tools found a few corner cases that leaked small structures,
generally around error handling or startup and shutdown.
* **Add --skip-likely-huge scoutfs print command option**
\
Add an option to scoutfs print to reduce the size of the output
so that it can be used to see system-wide metadata without being
overwhelmed by file-level details.
---
v1.5
\

View File

@@ -46,10 +46,6 @@ scoutfs-y += \
volopt.o \
xattr.o
ifdef KC_BUILD_KERNELCOMPAT
scoutfs-y += kernelcompat.o
endif
#
# The raw types aren't available in userspace headers. Make sure all
# the types we use in the headers are the exported __ versions.

View File

@@ -34,52 +34,3 @@ endif
ifneq (,$(shell grep 'FMODE_KABI_ITERATE' include/linux/fs.h))
ccflags-y += -DKC_FMODE_KABI_ITERATE
endif
#
# v4.11-12447-g104b4e5139fe
#
# Renamed __percpu_counter_add to percpu_counter_add_batch to clarify
# that the __ wasn't less safe, just took an extra parameter.
#
ifneq (,$(shell grep 'percpu_counter_add_batch' include/linux/percpu_counter.h))
ccflags-y += -DKC_PERCPU_COUNTER_ADD_BATCH
endif
#
# v4.11-4550-g7dea19f9ee63
#
# Introduced memalloc_nofs_{save,restore} preferred instead of _noio_.
#
ifneq (,$(shell grep 'memalloc_nofs_save' include/linux/sched/mm.h))
ccflags-y += -DKC_MEMALLOC_NOFS_SAVE
endif
#
# v4.7-12414-g1eff9d322a44
#
# Renamed bi_rw to bi_opf to force old code to catch up. We use it as a
# single switch between old and new bio structures.
#
ifneq (,$(shell grep 'bi_opf' include/linux/blk_types.h))
ccflags-y += -DKC_BIO_BI_OPF
endif
#
# v4.12-rc2-201-g4e4cbee93d56
#
# Moves to bi_status BLK_STS_ API instead of having a mix of error
# end_io args or bi_error.
#
ifneq (,$(shell grep 'bi_status' include/linux/blk_types.h))
ccflags-y += -DKC_BIO_BI_STATUS
endif
#
# v3.11-8765-ga0b02131c5fc
#
# Remove the old ->shrink() API, ->{scan,count}_objects is preferred.
#
ifneq (,$(shell grep '(*shrink)' include/linux/shrinker.h))
ccflags-y += -DKC_SHRINKER_SHRINK
KC_BUILD_KERNELCOMPAT=1
endif

View File

@@ -21,7 +21,6 @@
#include <linux/blkdev.h>
#include <linux/rhashtable.h>
#include <linux/random.h>
#include <linux/sched/mm.h>
#include "format.h"
#include "super.h"
@@ -58,7 +57,7 @@ struct block_info {
atomic64_t access_counter;
struct rhashtable ht;
wait_queue_head_t waitq;
KC_DEFINE_SHRINKER(shrinker);
struct shrinker shrinker;
struct work_struct free_work;
struct llist_head free_llist;
};
@@ -129,7 +128,7 @@ static __le32 block_calc_crc(struct scoutfs_block_header *hdr, u32 size)
static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
{
struct block_private *bp;
unsigned int nofs_flags;
unsigned int noio_flags;
/*
* If we had multiple blocks per page we'd need to be a little
@@ -157,9 +156,9 @@ static struct block_private *block_alloc(struct super_block *sb, u64 blkno)
* spurious reclaim-on dependencies and warnings.
*/
lockdep_off();
nofs_flags = memalloc_nofs_save();
noio_flags = memalloc_noio_save();
bp->virt = __vmalloc(SCOUTFS_BLOCK_LG_SIZE, GFP_NOFS | __GFP_HIGHMEM, PAGE_KERNEL);
memalloc_nofs_restore(nofs_flags);
memalloc_noio_restore(noio_flags);
lockdep_on();
if (!bp->virt) {
@@ -437,10 +436,11 @@ static void block_remove_all(struct super_block *sb)
* possible. Final freeing, verifying checksums, and unlinking errored
* blocks are all done by future users of the blocks.
*/
static void block_end_io(struct super_block *sb, unsigned int opf,
static void block_end_io(struct super_block *sb, int rw,
struct block_private *bp, int err)
{
DECLARE_BLOCK_INFO(sb, binf);
bool is_read = !(rw & WRITE);
if (err) {
scoutfs_inc_counter(sb, block_cache_end_io_error);
@@ -450,7 +450,7 @@ static void block_end_io(struct super_block *sb, unsigned int opf,
if (!atomic_dec_and_test(&bp->io_count))
return;
if (!op_is_write(opf) && !test_bit(BLOCK_BIT_ERROR, &bp->bits))
if (is_read && !test_bit(BLOCK_BIT_ERROR, &bp->bits))
set_bit(BLOCK_BIT_UPTODATE, &bp->bits);
clear_bit(BLOCK_BIT_IO_BUSY, &bp->bits);
@@ -463,13 +463,13 @@ static void block_end_io(struct super_block *sb, unsigned int opf,
wake_up(&binf->waitq);
}
static void KC_DECLARE_BIO_END_IO(block_bio_end_io, struct bio *bio)
static void block_bio_end_io(struct bio *bio, int err)
{
struct block_private *bp = bio->bi_private;
struct super_block *sb = bp->sb;
TRACE_BLOCK(end_io, bp);
block_end_io(sb, kc_bio_get_opf(bio), bp, kc_bio_get_errno(bio));
block_end_io(sb, bio->bi_rw, bp, err);
bio_put(bio);
}
@@ -477,7 +477,7 @@ static void KC_DECLARE_BIO_END_IO(block_bio_end_io, struct bio *bio)
* Kick off IO for a single block.
*/
static int block_submit_bio(struct super_block *sb, struct block_private *bp,
unsigned int opf)
int rw)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct bio *bio = NULL;
@@ -510,9 +510,8 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
break;
}
kc_bio_set_opf(bio, opf);
kc_bio_set_sector(bio, sector + (off >> 9));
bio_set_dev(bio, sbi->meta_bdev);
bio->bi_sector = sector + (off >> 9);
bio->bi_bdev = sbi->meta_bdev;
bio->bi_end_io = block_bio_end_io;
bio->bi_private = bp;
@@ -529,18 +528,18 @@ static int block_submit_bio(struct super_block *sb, struct block_private *bp,
BUG();
if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
submit_bio(bio);
submit_bio(rw, bio);
bio = NULL;
}
}
if (bio)
submit_bio(bio);
submit_bio(rw, bio);
blk_finish_plug(&plug);
/* let racing end_io know we're done */
block_end_io(sb, opf, bp, ret);
block_end_io(sb, rw, bp, ret);
return ret;
}
@@ -641,7 +640,7 @@ static struct block_private *block_read(struct super_block *sb, u64 blkno)
if (!test_bit(BLOCK_BIT_UPTODATE, &bp->bits) &&
test_and_clear_bit(BLOCK_BIT_NEW, &bp->bits)) {
ret = block_submit_bio(sb, bp, REQ_OP_READ);
ret = block_submit_bio(sb, bp, READ);
if (ret < 0)
goto out;
}
@@ -940,7 +939,7 @@ int scoutfs_block_writer_write(struct super_block *sb,
/* retry previous write errors */
clear_bit(BLOCK_BIT_ERROR, &bp->bits);
ret = block_submit_bio(sb, bp, REQ_OP_WRITE);
ret = block_submit_bio(sb, bp, WRITE);
if (ret < 0)
break;
}
@@ -1040,17 +1039,6 @@ u64 scoutfs_block_writer_dirty_bytes(struct super_block *sb,
return wri->nr_dirty_blocks * SCOUTFS_BLOCK_LG_SIZE;
}
static unsigned long block_count_objects(struct shrinker *shrink, struct shrink_control *sc)
{
struct block_info *binf = container_of(shrink, struct block_info, shrinker);
struct super_block *sb = binf->sb;
scoutfs_inc_counter(sb, block_cache_scan_objects);
return min_t(u64, (u64)atomic_read(&binf->total_inserted) * SCOUTFS_BLOCK_LG_PAGES_PER,
ULONG_MAX / 2); /* magic numbers as we approach ~0UL :/ */
}
/*
* Remove a number of cached blocks that haven't been used recently.
*
@@ -1071,19 +1059,23 @@ static unsigned long block_count_objects(struct shrinker *shrink, struct shrink_
* atomically remove blocks when the only references are ours and the
* hash table.
*/
static unsigned long block_scan_objects(struct shrinker *shrink, struct shrink_control *sc)
static int block_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct block_info *binf = container_of(shrink, struct block_info, shrinker);
struct block_info *binf = container_of(shrink, struct block_info,
shrinker);
struct super_block *sb = binf->sb;
struct rhashtable_iter iter;
struct block_private *bp;
unsigned long freed = 0;
unsigned long nr;
u64 recently;
scoutfs_inc_counter(sb, block_cache_scan_objects);
nr = sc->nr_to_scan;
if (nr == 0)
goto out;
nr = DIV_ROUND_UP(sc->nr_to_scan, SCOUTFS_BLOCK_LG_PAGES_PER);
scoutfs_inc_counter(sb, block_cache_shrink);
nr = DIV_ROUND_UP(nr, SCOUTFS_BLOCK_LG_PAGES_PER);
restart:
recently = accessed_recently(binf);
@@ -1126,7 +1118,6 @@ restart:
if (block_remove_solo(sb, bp)) {
scoutfs_inc_counter(sb, block_cache_shrink_remove);
TRACE_BLOCK(shrink, bp);
freed++;
nr--;
}
block_put(sb, bp);
@@ -1135,8 +1126,9 @@ restart:
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
return freed;
out:
return min_t(u64, (u64)atomic_read(&binf->total_inserted) * SCOUTFS_BLOCK_LG_PAGES_PER,
INT_MAX);
}
struct sm_block_completion {
@@ -1144,11 +1136,11 @@ struct sm_block_completion {
int err;
};
static void KC_DECLARE_BIO_END_IO(sm_block_bio_end_io, struct bio *bio)
static void sm_block_bio_end_io(struct bio *bio, int err)
{
struct sm_block_completion *sbc = bio->bi_private;
sbc->err = kc_bio_get_errno(bio);
sbc->err = err;
complete(&sbc->comp);
bio_put(bio);
}
@@ -1163,8 +1155,9 @@ static void KC_DECLARE_BIO_END_IO(sm_block_bio_end_io, struct bio *bio)
* only layer that sees the full block buffer so we pass the calculated
* crc to the caller for them to check in their context.
*/
static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsigned int opf,
u64 blkno, struct scoutfs_block_header *hdr, size_t len, __le32 *blk_crc)
static int sm_block_io(struct super_block *sb, struct block_device *bdev, int rw, u64 blkno,
struct scoutfs_block_header *hdr, size_t len,
__le32 *blk_crc)
{
struct scoutfs_block_header *pg_hdr;
struct sm_block_completion sbc;
@@ -1178,7 +1171,7 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsign
return -EIO;
if (WARN_ON_ONCE(len > SCOUTFS_BLOCK_SM_SIZE) ||
WARN_ON_ONCE(!op_is_write(opf) && !blk_crc))
WARN_ON_ONCE(!(rw & WRITE) && !blk_crc))
return -EINVAL;
page = alloc_page(GFP_NOFS);
@@ -1187,7 +1180,7 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsign
pg_hdr = page_address(page);
if (op_is_write(opf)) {
if (rw & WRITE) {
memcpy(pg_hdr, hdr, len);
if (len < SCOUTFS_BLOCK_SM_SIZE)
memset((char *)pg_hdr + len, 0,
@@ -1201,9 +1194,8 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsign
goto out;
}
bio->bi_opf = opf | REQ_SYNC;
kc_bio_set_sector(bio, blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9));
bio_set_dev(bio, bdev);
bio->bi_sector = blkno << (SCOUTFS_BLOCK_SM_SHIFT - 9);
bio->bi_bdev = bdev;
bio->bi_end_io = sm_block_bio_end_io;
bio->bi_private = &sbc;
bio_add_page(bio, page, SCOUTFS_BLOCK_SM_SIZE, 0);
@@ -1211,12 +1203,12 @@ static int sm_block_io(struct super_block *sb, struct block_device *bdev, unsign
init_completion(&sbc.comp);
sbc.err = 0;
submit_bio(bio);
submit_bio((rw & WRITE) ? WRITE_SYNC : READ_SYNC, bio);
wait_for_completion(&sbc.comp);
ret = sbc.err;
if (ret == 0 && !op_is_write(opf)) {
if (ret == 0 && !(rw & WRITE)) {
memcpy(hdr, pg_hdr, len);
*blk_crc = block_calc_crc(pg_hdr, SCOUTFS_BLOCK_SM_SIZE);
}
@@ -1230,14 +1222,14 @@ int scoutfs_block_read_sm(struct super_block *sb,
struct scoutfs_block_header *hdr, size_t len,
__le32 *blk_crc)
{
return sm_block_io(sb, bdev, REQ_OP_READ, blkno, hdr, len, blk_crc);
return sm_block_io(sb, bdev, READ, blkno, hdr, len, blk_crc);
}
int scoutfs_block_write_sm(struct super_block *sb,
struct block_device *bdev, u64 blkno,
struct scoutfs_block_header *hdr, size_t len)
{
return sm_block_io(sb, bdev, REQ_OP_WRITE, blkno, hdr, len, NULL);
return sm_block_io(sb, bdev, WRITE, blkno, hdr, len, NULL);
}
int scoutfs_block_setup(struct super_block *sb)
@@ -1262,8 +1254,7 @@ int scoutfs_block_setup(struct super_block *sb)
atomic_set(&binf->total_inserted, 0);
atomic64_set(&binf->access_counter, 0);
init_waitqueue_head(&binf->waitq);
KC_INIT_SHRINKER_FUNCS(struct block_info, shrinker,
&binf->shrinker, block_count_objects, block_scan_objects);
binf->shrinker.shrink = block_shrink;
binf->shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&binf->shrinker);
INIT_WORK(&binf->free_work, block_free_work);

View File

@@ -30,8 +30,6 @@
EXPAND_COUNTER(block_cache_free) \
EXPAND_COUNTER(block_cache_free_work) \
EXPAND_COUNTER(block_cache_remove_stale) \
EXPAND_COUNTER(block_cache_count_objects) \
EXPAND_COUNTER(block_cache_scan_objects) \
EXPAND_COUNTER(block_cache_shrink) \
EXPAND_COUNTER(block_cache_shrink_next) \
EXPAND_COUNTER(block_cache_shrink_recent) \
@@ -237,12 +235,12 @@ struct scoutfs_counters {
#define SCOUTFS_PCPU_COUNTER_BATCH (1 << 30)
#define scoutfs_inc_counter(sb, which) \
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, 1, \
SCOUTFS_PCPU_COUNTER_BATCH)
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, 1, \
SCOUTFS_PCPU_COUNTER_BATCH)
#define scoutfs_add_counter(sb, which, cnt) \
percpu_counter_add_batch(&SCOUTFS_SB(sb)->counters->which, cnt, \
SCOUTFS_PCPU_COUNTER_BATCH)
__percpu_counter_add(&SCOUTFS_SB(sb)->counters->which, cnt, \
SCOUTFS_PCPU_COUNTER_BATCH)
void __init scoutfs_init_counters(void);
int scoutfs_setup_counters(struct super_block *sb);

View File

@@ -1,23 +0,0 @@
#include "kernelcompat.h"
#ifdef KC_SHRINKER_SHRINK
#include <linux/shrinker.h>
/*
* If a target doesn't have that .{count,scan}_objects() interface then
* we have a .shrink() helper that performs the shrink work in terms of
* count/scan.
*/
int kc_shrink_wrapper(struct shrinker *shrink, struct shrink_control *sc)
{
struct kc_shrinker_funcs *funcs = KC_SHRINKER_FUNCS(shrink);
unsigned long nr;
if (sc->nr_to_scan != 0)
funcs->scan_objects(shrink, sc);
nr = funcs->count_objects(shrink, sc);
return min_t(unsigned long, nr, INT_MAX);
}
#endif

View File

@@ -46,81 +46,4 @@ static inline int dir_emit_dots(struct file *file, void *dirent,
}
#endif
#ifndef KC_DIR_EMIT_DOTS
#define percpu_counter_add_batch __percpu_counter_add
#endif
#ifndef KC_MEMALLOC_NOFS_SAVE
#define memalloc_nofs_save memalloc_noio_save
#define memalloc_nofs_restore memalloc_noio_restore
#endif
#ifdef KC_BIO_BI_OPF
#define kc_bio_get_opf(bio) \
({ \
(bio)->bi_opf; \
})
#define kc_bio_set_opf(bio, opf) \
do { \
(bio)->bi_opf = opf; \
} while (0)
#define kc_bio_set_sector(bio, sect) \
do { \
(bio)->bi_iter.bi_sector = sect;\
} while (0)
#else
#define kc_bio_get_opf(bio) \
({ \
(bio)->bi_rw; \
})
#define kc_bio_set_opf(bio, opf) \
do { \
(bio)->bio_rw = opf; \
} while (0)
#define kc_bio_set_sector(bio, sect) \
do { \
(bio)->bi_sector = sect; \
} while (0)
#endif
#ifdef KC_BIO_BI_STATUS
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio)
#define kc_bio_get_errno(bio) ({ blk_status_to_errno((bio)->bi_status); })
#else
#define KC_DECLARE_BIO_END_IO(name, bio) name(bio, int _error_arg)
#define kc_bio_get_errno(bio) ({ (int)((void)(bio), _error_arg); })
#endif
#ifndef KC_SHRINKER_SHRINK
#define KC_DEFINE_SHRINKER(name) struct shrinker name
#define KC_INIT_SHRINKER_FUNCS(type, name, shrink, count, scan) do { \
__typeof__(shrink) _shrink = (shrink); \
_shrink->count_objects = count; \
_shrink->scan_objects = scan; \
} while (0)
#else
#include <linux/shrinker.h>
struct kc_shrinker_funcs {
unsigned long (*count_objects)(struct shrinker *, struct shrink_control *sc);
unsigned long (*scan_objects)(struct shrinker *, struct shrink_control *sc);
};
/* using adjacent member of an unnamed struct */
#define KC_DEFINE_SHRINKER(name) \
{ \
struct kc_shrinker_funcs shrinker_funcs; \
struct shinker name; \
}
#define KC_SHRINKER_FUNCS(shrinker) \
((void *)((long)(shrink) - sizeof(struct kc_shrinker_funcs)))
#define KC_INIT_SHRINKER_FUNCS(type, name, shrink, count, scan) do { \
BUILD_BUG_ON(offsetof(cont, shrink_funcs) + sizeof(struct kc_shrinker_funcs)) != \
offsetof(cont, name) + sizeof(struct kc_shrinker_funcs); \
struct kc_shrinker_funcs *_funcs = KC_SHRINKER_FUNCS(shrink) \
__typeof__(shrink) _shrink = (shrink); \
_funcs->count_objects = count; \
_funcs->scan_objects = scan; \
_shrink->shrink = kc_shrink_wrapper; \
} while (0)
#endif
#endif

View File

@@ -355,7 +355,6 @@ static int submit_send(struct super_block *sb,
}
if (rid != 0) {
spin_unlock(&conn->lock);
kfree(msend);
return -ENOTCONN;
}
}
@@ -1346,12 +1345,10 @@ scoutfs_net_alloc_conn(struct super_block *sb,
if (!conn)
return NULL;
if (info_size) {
conn->info = kzalloc(info_size, GFP_NOFS);
if (!conn->info) {
kfree(conn);
return NULL;
}
conn->info = kzalloc(info_size, GFP_NOFS);
if (!conn->info) {
kfree(conn);
return NULL;
}
conn->workq = alloc_workqueue("scoutfs_net_%s",

View File

@@ -157,15 +157,6 @@ static int free_rid(struct omap_rid_list *list, struct omap_rid_entry *entry)
return nr;
}
static void free_rid_list(struct omap_rid_list *list)
{
struct omap_rid_entry *entry;
struct omap_rid_entry *tmp;
list_for_each_entry_safe(entry, tmp, &list->head, head)
free_rid(list, entry);
}
static int copy_rids(struct omap_rid_list *to, struct omap_rid_list *from, spinlock_t *from_lock)
{
struct omap_rid_entry *entry;
@@ -813,10 +804,6 @@ void scoutfs_omap_server_shutdown(struct super_block *sb)
llist_for_each_entry_safe(req, tmp, requests, llnode)
kfree(req);
spin_lock(&ominf->lock);
free_rid_list(&ominf->rids);
spin_unlock(&ominf->lock);
synchronize_rcu();
}
@@ -877,10 +864,6 @@ void scoutfs_omap_destroy(struct super_block *sb)
rhashtable_walk_stop(&iter);
rhashtable_walk_exit(&iter);
spin_lock(&ominf->lock);
free_rid_list(&ominf->rids);
spin_unlock(&ominf->lock);
rhashtable_destroy(&ominf->group_ht);
rhashtable_destroy(&ominf->req_ht);
kfree(ominf);

View File

@@ -496,7 +496,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
ret = assign_random_id(sbi);
if (ret < 0)
goto out;
return ret;
spin_lock_init(&sbi->next_ino_lock);
spin_lock_init(&sbi->data_wait_root.lock);
@@ -505,7 +505,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent)
/* parse options early for use during setup */
ret = scoutfs_options_early_setup(sb, data);
if (ret < 0)
goto out;
return ret;
scoutfs_options_read(sb, &opts);
ret = sb_set_blocksize(sb, SCOUTFS_BLOCK_SM_SIZE);

View File

@@ -0,0 +1,140 @@
#!/usr/bin/bash
# /usr/libexec/scoutfs-fenced/run/ipmi-remote-host
# ipmi configuration
SCOUTFS_IPMI_CONFIG_FILE=${SCOUTFS_IPMI_CONFIG_FILE:-/etc/scoutfs/scoutfs-ipmi.conf}
SCOUTFS_IPMI_HOSTS_FILE=${SCOUTFS_IPMI_HOSTS_FILE:-/etc/scoutfs/scoutfs-ipmi-hosts.conf}
## hosts file format
## SCOUTFS_HOST_IP IPMI_ADDRESS
## ex:
# 192.168.1.1 192.168.10.1
# command setup
IPMI_POWER="/sbin/ipmipower"
SSH_CMD="ssh -o ConnectTimeout=3 -o BatchMode=yes -o StrictHostKeyChecking=no"
LOGGER="/bin/logger -p local3.crit -t scoutfs-fenced"
$LOGGER "ipmi fence script invoked: IP: $SCOUTFS_FENCED_REQ_IP RID: $SCOUTFS_FENCED_REQ_RID TEST: $IPMITEST"
echo_fail() {
echo "$@" >&2
$LOGGER "fence failed: $@"
exit 1
}
echo_log() {
echo "$@" >&2
$LOGGER "fence info: $@"
}
echo_test_pass() {
echo -e "\xE2\x9C\x94 $@"
}
echo_test_fail() {
echo -e "\xE2\x9D\x8C $@"
}
test -n "$SCOUTFS_IPMI_CONFIG_FILE" || \
echo_fail "SCOUTFS_IPMI_CONFIG_FILE isn't set"
test -r "$SCOUTFS_IPMI_CONFIG_FILE" || \
echo_fail "$SCOUTFS_IPMI_CONFIG_FILE isn't readable file"
. "$SCOUTFS_IPMI_CONFIG_FILE"
test -n "$SCOUTFS_IPMI_HOSTS_FILE" || \
echo_fail "SCOUTFS_IPMI_HOSTS_FILE isn't set"
test -r "$SCOUTFS_IPMI_HOSTS_FILE" || \
echo_fail "$SCOUTFS_IPMI_HOSTS_FILE isn't readable file"
test -x "$IPMI_POWER" || \
echo_fail "$IPMI_POWER not found, need to install freeimpi?"
export ip="$SCOUTFS_FENCED_REQ_IP"
export rid="$SCOUTFS_FENCED_REQ_RID"
getIPMIhost () {
host=$(awk -v ip="$1" '$1 == ip {print $2}' "$SCOUTFS_IPMI_HOSTS_FILE") || \
echo_fail "lookup ipmi host failed"
echo "$host"
}
powerOffHost() {
# older versions of ipmipower inverted wait-until-off/wait-until-on, so specify both
$IPMI_POWER $IPMI_OPTS -h "$1" --wait-until-off --wait-until-on --off || \
echo_fail "ipmi power off $1 failed"
ipmioutput=$($IPMI_POWER $IPMI_OPTS -h "$1" --stat) || \
echo_fail "ipmi power stat $1 failed"
if [[ ! "$ipmioutput" =~ off ]]; then
echo_fail "ipmi stat $1 not off"
fi
$LOGGER "ipmi fence power down $1 success"
exit 0
}
if [ -n "$IPMITEST" ]; then
for i in $(awk '!/^($|[[:space:]]*#)/ {print $1}' "$SCOUTFS_IPMI_HOSTS_FILE"); do
if ! $SSH_CMD "$i" /bin/true; then
echo_test_fail "ssh $i"
else
echo_test_pass "ssh $i"
fi
host=$(getIPMIhost "$i")
if [ -z "$host" ]; then
echo_test_fail "ipmi config $i $host"
else
if ! $IPMI_POWER $IPMI_OPTS -h "$host" --stat; then
echo_test_fail "ipmi $i"
else
echo_test_pass "ipmi $i"
fi
fi
done
exit 0
fi
if [ -z "$ip" ]; then
echo_fail "no IP given for fencing"
fi
host=$(getIPMIhost "$ip")
if [ -z "$host" ]; then
echo_fail "no IPMI host found for fence IP"
fi
# first check via ssh if the mount still exists
# if ssh succeeds, we will only power down the node if mounted
if ! output=$($SSH_CMD "$ip" "echo BEGIN; LC_ALL=C egrep -m 1 '(^0x*|^$rid$)' /sys/kernel/boot_params/version /sys/fs/scoutfs/f*r*/rid; echo END"); then
# ssh not working, just power down host
powerOffHost "$host"
fi
if [[ ! "$output" =~ BEGIN ]]; then
# ssh failure
echo_log "no BEGIN"
powerOffHost "$host"
fi
if [[ ! "$output" =~ \/boot_params\/ ]]; then
# ssh failure
echo_log "no boot params"
powerOffHost "$host"
fi
if [[ ! "$output" =~ END ]]; then
# ssh failure
echo_log "no END"
powerOffHost "$host"
fi
if [[ "$output" =~ "rid:$rid" ]]; then
# rid still mounted, power down
echo_log "rid $rid still mounted"
powerOffHost "$host"
fi
$LOGGER "ipmi fence host $ip/$host success (rid $rid not mounted)"
exit 0

View File

@@ -0,0 +1,36 @@
#!/usr/bin/bash
# /usr/libexec/scoutfs-fenced/run/local-force-umount
echo_fail() {
echo "$@" > /dev/stderr
exit 1
}
rid="$SCOUTFS_FENCED_REQ_RID"
#
# Look for a local mount with the rid to fence. Typically we'll at
# least find the mount with the server that requested the fence that
# we're processing. But it's possible that mounts are unmounted
# before, or while, we're running.
#
mnts=$(findmnt -l -n -t scoutfs -o TARGET) || \
echo_fail "findmnt -t scoutfs failed" > /dev/stderr
for mnt in $mnts; do
mnt_rid=$(scoutfs statfs -p "$mnt" -s rid) || \
echo_fail "scoutfs statfs $mnt failed"
if [ "$mnt_rid" == "$rid" ]; then
umount -f "$mnt" || \
echo_fail "umout -f $mnt"
exit 0
fi
done
#
# If the mount doesn't exist on this host then it can't access the
# devices by definition and can be considered fenced.
#
exit 0

View File

@@ -0,0 +1,139 @@
#!/usr/bin/bash
# /usr/libexec/scoutfs-fenced/run/powerman-remote-host
# powerman configuration
SCOUTFS_PM_CONFIG_FILE=${SCOUTFS_PM_CONFIG_FILE:-/etc/scoutfs/scoutfs-pm.conf}
SCOUTFS_PM_HOSTS_FILE=${SCOUTFS_PM_HOSTS_FILE:-/etc/scoutfs/scoutfs-pm-hosts.conf}
## hosts file format
## SCOUTFS_HOST_IP POWERMAN_NODE_NAME
## ex:
# 192.168.1.1 dm1
# command setup
PM_CMD="/usr/bin/pm"
SSH_CMD="ssh -o ConnectTimeout=3 -o BatchMode=yes -o StrictHostKeyChecking=no"
LOGGER="/bin/logger -p local3.crit -t scoutfs-fenced"
$LOGGER "ipmi fence script invoked: IP: $SCOUTFS_FENCED_REQ_IP RID: $SCOUTFS_FENCED_REQ_RID TEST: $IPMITEST"
echo_fail() {
echo "$@" >&2
$LOGGER "fence failed: $@"
exit 1
}
echo_log() {
echo "$@" >&2
$LOGGER "fence info: $@"
}
echo_test_pass() {
echo -e "\xE2\x9C\x94 $@"
}
echo_test_fail() {
echo -e "\xE2\x9D\x8C $@"
}
test -n "$SCOUTFS_PM_CONFIG_FILE" || \
echo_fail "SCOUTFS_PM_CONFIG_FILE isn't set"
test -r "$SCOUTFS_PM_CONFIG_FILE" || \
echo_fail "$SCOUTFS_PM_CONFIG_FILE isn't readable file"
. "$SCOUTFS_PM_CONFIG_FILE"
test -n "$SCOUTFS_PM_HOSTS_FILE" || \
echo_fail "SCOUTFS_PM_HOSTS_FILE isn't set"
test -r "$SCOUTFS_PM_HOSTS_FILE" || \
echo_fail "$SCOUTFS_PM_HOSTS_FILE isn't readable file"
test -x "$PM_CMD" || \
echo_fail "$PMCMD not found, need to install powerman?"
export ip="$SCOUTFS_FENCED_REQ_IP"
fence_rid="$SCOUTFS_FENCED_REQ_RID"
getPMhost () {
host=$(awk -v ip="$1" '$1 == ip {print $2}' "$SCOUTFS_PM_HOSTS_FILE") || \
echo_fail "lookup pm host failed"
echo "$host"
}
powerOffHost() {
$PM_CMD $PM_OPTS "$1" -0 || \
echo_fail "pm power off $host failed"
pmoutput=$($PM_CMD $PM_OPTS "$1" -q | grep "$1") || \
echo_fail "powerman power stat $1 failed"
if [[ ! "$pmoutput" =~ off ]]; then
echo_fail "powerman stat $1 not off"
fi
$LOGGER "powerman fence power down $1 success"
exit 0
}
if [ -n "$PMTEST" ]; then
for i in $(awk '!/^($|[[:space:]]*#)/ {print $1}' "$SCOUTFS_PM_HOSTS_FILE"); do
if ! $SSH_CMD "$i" /bin/true; then
echo_test_fail "ssh $i"
else
echo_test_pass "ssh $i"
fi
host=$(getPMhost "$i")
if [ -z "$host" ]; then
echo_test_fail "pm config $i $host"
else
if ! $PM_CMD $PM_OPTS "$host" -q; then
echo_test_fail "pm $i"
else
echo_test_pass "pm $i"
fi
fi
done
exit 0
fi
if [ -z "$ip" ]; then
echo_fail "no IP given for fencing"
fi
host=$(getPMhost "$ip")
if [ -z "$host" ]; then
echo_fail "no host found for fence IP"
fi
# first check via ssh if the mount still exists
# if ssh succeeds, we will only power down the node if mounted
if ! output=$($SSH_CMD "$ip" "echo BEGIN; LC_ALL=C egrep -m 1 '(^0x*|^$rid$)' /sys/kernel/boot_params/version /sys/fs/scoutfs/f*r*/rid; echo END"); then
# ssh not working, just power down host
powerOffHost "$host"
fi
if [[ ! "$output" =~ BEGIN ]]; then
# ssh failure
echo_log "no BEGIN"
powerOffHost "$host"
fi
if [[ ! "$output" =~ \/boot_params\/ ]]; then
# ssh failure
echo_log "no boot params"
powerOffHost "$host"
fi
if [[ ! "$output" =~ END ]]; then
# ssh failure
echo_log "no END"
powerOffHost "$host"
fi
if [[ "$output" =~ "rid:$rid" ]]; then
# rid still mounted, power down
echo_log "rid $rid still mounted"
powerOffHost "$host"
fi
$LOGGER "powerman fence host $ip/$host success (rid $rid not mounted)"
exit 0

View File

@@ -0,0 +1,11 @@
# /etc/scoutfs/scoutfs-ipmi-hosts.conf
## config file format
##
## SCOUTFS_HOST_IP must match the interface used for scoutfs
## leader/follower communications
##
## SCOUTFS_HOST_IP IPMI_ADDRESS
## ex:
#192.168.1.1 192.168.10.1

View File

@@ -0,0 +1,10 @@
#!/usr/bin/bash
# /etc/scoutfs/scoutfs-ipmi.conf
IPMI_USER="admin"
IPMI_PASSWORD="password"
IPMI_OPTS="-D LAN_2_0 -u $IPMI_USER -p $IPMI_PASSWORD"
# some Intel BMCs need -I 17
# IPMI_OPTS="-D LAN_2_0 -u $IPMI_USER -p $IPMI_PASSWORD -I 17"

View File

@@ -0,0 +1,11 @@
# /etc/scoutfs/scoutfs-ipmi-hosts.conf
## config file format
##
## SCOUTFS_HOST_IP must match the interface used for scoutfs
## leader/follower communications
##
## SCOUTFS_HOST_IP POWERMAN_NODE_NAME
## ex:
#192.168.1.1 node1

View File

@@ -0,0 +1,8 @@
#!/usr/bin/bash
# /etc/scoutfs/scoutfs-pm.conf
PM_OPTS=""
# optionally specify remote powerman server
#PM_OPTS="-h pm-server.localdomain"

View File

@@ -597,7 +597,7 @@ format.
.PD
.TP
.BI "print {-S|--skip-likely-huge} META-DEVICE"
.BI "print META-DEVICE"
.sp
Prints out all of the metadata in the file system. This makes no effort
to ensure that the structures are consistent as they're traversed and
@@ -607,20 +607,6 @@ output.
.PD 0
.TP
.sp
.B "-S, --skip-likely-huge"
Skip printing structures that are likely to be very large. The
structures that are skipped tend to be global and whose size tends to be
related to the size of the volume. Examples of skipped structures include
the global fs items, srch files, and metadata and data
allocators. Similar structures that are not skipped are related to the
number of mounts and are maintained at a relatively reasonable size.
These include per-mount log trees, srch files, allocators, and the
metadata allocators used by server commits.
.sp
Skipping the larger structures limits the print output to a relatively
constant size rather than being a large multiple of the used metadata
space of the volume making the output much more useful for inspection.
.TP
.B "META-DEVICE"
The path to the metadata device for the filesystem whose metadata will be
printed. Since this command reads via the host's buffer cache, it may not

View File

@@ -55,14 +55,21 @@ install -m 755 -D src/scoutfs $RPM_BUILD_ROOT%{_sbindir}/scoutfs
install -m 644 -D src/ioctl.h $RPM_BUILD_ROOT%{_includedir}/scoutfs/ioctl.h
install -m 644 -D src/format.h $RPM_BUILD_ROOT%{_includedir}/scoutfs/format.h
install -m 755 -D fenced/scoutfs-fenced $RPM_BUILD_ROOT%{_libexecdir}/scoutfs-fenced/scoutfs-fenced
install -m 755 -D fenced/local-force-unmount $RPM_BUILD_ROOT%{_libexecdir}/scoutfs-fenced/run/local-force-unmount
install -m 755 -D fenced/ipmi-remote-host $RPM_BUILD_ROOT%{_libexecdir}/scoutfs-fenced/run/ipmi-remote-host
install -m 755 -D fenced/powerman-remote-host $RPM_BUILD_ROOT%{_libexecdir}/scoutfs-fenced/run/powerman-remote-host
install -m 644 -D fenced/scoutfs-fenced.service $RPM_BUILD_ROOT%{_unitdir}/scoutfs-fenced.service
install -m 644 -D fenced/scoutfs-fenced.conf.example $RPM_BUILD_ROOT%{_sysconfdir}/scoutfs/scoutfs-fenced.conf.example
install -m 644 -D fenced/scoutfs-ipmi.conf $RPM_BUILD_ROOT%{_sysconfdir}/scoutfs/scoutfs-ipmi.conf
install -m 644 -D fenced/scoutfs-ipmi-hosts.conf $RPM_BUILD_ROOT%{_sysconfdir}/scoutfs/scoutfs-ipmi-hosts.conf
install -m 644 -D fenced/scoutfs-pm.conf $RPM_BUILD_ROOT%{_sysconfdir}/scoutfs/scoutfs-pm.conf
install -m 644 -D fenced/scoutfs-pm-hosts.conf $RPM_BUILD_ROOT%{_sysconfdir}/scoutfs/scoutfs-pm-hosts.conf
%files
%defattr(644,root,root,755)
%{_mandir}/man*/scoutfs*.gz
%{_unitdir}/scoutfs-fenced.service
%{_sysconfdir}/scoutfs
%config(noreplace) %{_sysconfdir}/scoutfs
%defattr(755,root,root,755)
%{_sbindir}/scoutfs
%{_libexecdir}/scoutfs-fenced

View File

@@ -8,7 +8,6 @@
#include <errno.h>
#include <string.h>
#include <stdarg.h>
#include <stdbool.h>
#include <ctype.h>
#include <uuid/uuid.h>
#include <sys/socket.h>
@@ -990,10 +989,9 @@ static void print_super_block(struct scoutfs_super_block *super, u64 blkno)
struct print_args {
char *meta_device;
bool skip_likely_huge;
};
static int print_volume(int fd, struct print_args *args)
static int print_volume(int fd)
{
struct scoutfs_super_block *super = NULL;
struct print_recursion_args pa;
@@ -1043,26 +1041,23 @@ static int print_volume(int fd, struct print_args *args)
ret = err;
}
if (!args->skip_likely_huge) {
for (i = 0; i < array_size(super->meta_alloc); i++) {
snprintf(str, sizeof(str), "meta_alloc[%u]", i);
err = print_btree(fd, super, str, &super->meta_alloc[i].root,
print_alloc_item, NULL);
if (err && !ret)
ret = err;
}
err = print_btree(fd, super, "data_alloc", &super->data_alloc.root,
for (i = 0; i < array_size(super->meta_alloc); i++) {
snprintf(str, sizeof(str), "meta_alloc[%u]", i);
err = print_btree(fd, super, str, &super->meta_alloc[i].root,
print_alloc_item, NULL);
if (err && !ret)
ret = err;
}
err = print_btree(fd, super, "data_alloc", &super->data_alloc.root,
print_alloc_item, NULL);
if (err && !ret)
ret = err;
err = print_btree(fd, super, "srch_root", &super->srch_root,
print_srch_root_item, NULL);
if (err && !ret)
ret = err;
err = print_btree(fd, super, "logs_root", &super->logs_root,
print_log_trees_item, NULL);
if (err && !ret)
@@ -1070,23 +1065,19 @@ static int print_volume(int fd, struct print_args *args)
pa.super = super;
pa.fd = fd;
if (!args->skip_likely_huge) {
err = print_btree_leaf_items(fd, super, &super->srch_root.ref,
print_srch_root_files, &pa);
if (err && !ret)
ret = err;
}
err = print_btree_leaf_items(fd, super, &super->srch_root.ref,
print_srch_root_files, &pa);
if (err && !ret)
ret = err;
err = print_btree_leaf_items(fd, super, &super->logs_root.ref,
print_log_trees_roots, &pa);
if (err && !ret)
ret = err;
if (!args->skip_likely_huge) {
err = print_btree(fd, super, "fs_root", &super->fs_root,
print_fs_item, NULL);
if (err && !ret)
ret = err;
}
err = print_btree(fd, super, "fs_root", &super->fs_root,
print_fs_item, NULL);
if (err && !ret)
ret = err;
out:
free(super);
@@ -1107,7 +1098,7 @@ static int do_print(struct print_args *args)
return ret;
}
ret = print_volume(fd, args);
ret = print_volume(fd);
close(fd);
return ret;
};
@@ -1117,9 +1108,6 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
struct print_args *args = state->input;
switch (key) {
case 'S':
args->skip_likely_huge = true;
break;
case ARGP_KEY_ARG:
if (!args->meta_device)
args->meta_device = strdup_or_error(state, arg);
@@ -1137,13 +1125,8 @@ static int parse_opt(int key, char *arg, struct argp_state *state)
return 0;
}
static struct argp_option options[] = {
{ "skip-likely-huge", 'S', NULL, 0, "Skip large structures to minimize output size"},
{ NULL }
};
static struct argp argp = {
options,
NULL,
parse_opt,
"META-DEV",
"Print metadata structures"