From ccefffe74fc66118df094b6d008b71f2f8fe68cc Mon Sep 17 00:00:00 2001 From: Zach Brown Date: Thu, 21 Sep 2017 13:03:54 -0700 Subject: [PATCH] scoutfs: add item, range, lock alloc/free counters Add some counters to track allocation and freeing of our structures that are subject to shrinking. This lets us eyeball the counters to see if we have runaway leaks. Signed-off-by: Zach Brown --- kmod/src/counters.h | 10 ++++++++-- kmod/src/item.c | 7 +++++++ kmod/src/lock.c | 3 +++ 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/kmod/src/counters.h b/kmod/src/counters.h index 9fe291c0..86b5afa2 100644 --- a/kmod/src/counters.h +++ b/kmod/src/counters.h @@ -31,10 +31,14 @@ EXPAND_COUNTER(data_invalidatepage) \ EXPAND_COUNTER(data_writepage) \ EXPAND_COUNTER(data_end_writeback_page) \ + EXPAND_COUNTER(item_alloc) \ + EXPAND_COUNTER(item_free) \ EXPAND_COUNTER(item_create) \ EXPAND_COUNTER(item_lookup_hit) \ EXPAND_COUNTER(item_lookup_miss) \ EXPAND_COUNTER(item_delete) \ + EXPAND_COUNTER(item_range_alloc) \ + EXPAND_COUNTER(item_range_free) \ EXPAND_COUNTER(item_range_hit) \ EXPAND_COUNTER(item_range_miss) \ EXPAND_COUNTER(item_range_insert) \ @@ -43,10 +47,12 @@ EXPAND_COUNTER(item_shrink_dirty_abort) \ EXPAND_COUNTER(item_shrink_skip_inced) \ EXPAND_COUNTER(item_shrink_range) \ - EXPAND_COUNTER(item_shrink) + EXPAND_COUNTER(item_shrink) \ + EXPAND_COUNTER(lock_alloc) \ + EXPAND_COUNTER(lock_free) #define FIRST_COUNTER alloc_alloc -#define LAST_COUNTER item_shrink +#define LAST_COUNTER lock_free #undef EXPAND_COUNTER #define EXPAND_COUNTER(which) struct percpu_counter which; diff --git a/kmod/src/item.c b/kmod/src/item.c index cb3fee9a..aed13730 100644 --- a/kmod/src/item.c +++ b/kmod/src/item.c @@ -103,6 +103,7 @@ static u8 item_flags(struct cached_item *item) static void free_item(struct super_block *sb, struct cached_item *item) { if (!IS_ERR_OR_NULL(item)) { + scoutfs_inc_counter(sb, item_free); WARN_ON_ONCE(!list_empty(&item->entry)); WARN_ON_ONCE(!RB_EMPTY_NODE(&item->node)); scoutfs_key_free(sb, item->key); @@ -133,6 +134,9 @@ static struct cached_item *alloc_item(struct super_block *sb, } } + if (item) + scoutfs_inc_counter(sb, item_alloc); + return item; } @@ -563,6 +567,7 @@ static bool check_range(struct super_block *sb, struct rb_root *root, static void free_range(struct super_block *sb, struct cached_range *rng) { if (!IS_ERR_OR_NULL(rng)) { + scoutfs_inc_counter(sb, item_range_free); scoutfs_key_free(sb, rng->start); scoutfs_key_free(sb, rng->end); kfree(rng); @@ -1099,6 +1104,7 @@ int scoutfs_item_insert_batch(struct super_block *sb, struct list_head *list, if (WARN_ON_ONCE(scoutfs_key_compare(start, end) > 0)) return -EINVAL; + scoutfs_inc_counter(sb, item_range_alloc); rng = kzalloc(sizeof(struct cached_range), GFP_NOFS); if (rng) { rng->start = scoutfs_key_dup(sb, start); @@ -1657,6 +1663,7 @@ int scoutfs_item_invalidate(struct super_block *sb, /* XXX think about racing with trans write */ + scoutfs_inc_counter(sb, item_range_alloc); rng = kzalloc(sizeof(struct cached_range), GFP_NOFS); if (rng) { rng->start = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE); diff --git a/kmod/src/lock.c b/kmod/src/lock.c index 994121a5..acd60e6c 100644 --- a/kmod/src/lock.c +++ b/kmod/src/lock.c @@ -27,6 +27,7 @@ #include "dlmglue.h" #include "inode.h" #include "trans.h" +#include "counters.h" #define LN_FMT "%u.%u.%u.%llu.%llu" #define LN_ARG(name) \ @@ -107,6 +108,7 @@ static void free_scoutfs_lock(struct scoutfs_lock *lock) if (lock) { linfo = SCOUTFS_SB(lock->sb)->lock_info; + scoutfs_inc_counter(lock->sb, lock_free); ocfs2_lock_res_free(&lock->lockres); scoutfs_key_free(lock->sb, lock->start); scoutfs_key_free(lock->sb, lock->end); @@ -294,6 +296,7 @@ search: found->sequence = ++linfo->seq_cnt; rb_link_node(&found->node, parent, node); rb_insert_color(&found->node, &linfo->lock_tree); + scoutfs_inc_counter(sb, lock_alloc); } found->refcnt++; if (test_bit(SCOUTFS_LOCK_RECLAIM, &found->flags)) {