scoutfs: add item, range, lock alloc/free counters

Add some counters to track allocation and freeing of our structures that
are subject to shrinking.  This lets us eyeball the counters to see if
we have runaway leaks.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2017-09-21 13:03:54 -07:00
committed by Mark Fasheh
parent 15aa09b0c2
commit ccefffe74f
3 changed files with 18 additions and 2 deletions

View File

@@ -31,10 +31,14 @@
EXPAND_COUNTER(data_invalidatepage) \
EXPAND_COUNTER(data_writepage) \
EXPAND_COUNTER(data_end_writeback_page) \
EXPAND_COUNTER(item_alloc) \
EXPAND_COUNTER(item_free) \
EXPAND_COUNTER(item_create) \
EXPAND_COUNTER(item_lookup_hit) \
EXPAND_COUNTER(item_lookup_miss) \
EXPAND_COUNTER(item_delete) \
EXPAND_COUNTER(item_range_alloc) \
EXPAND_COUNTER(item_range_free) \
EXPAND_COUNTER(item_range_hit) \
EXPAND_COUNTER(item_range_miss) \
EXPAND_COUNTER(item_range_insert) \
@@ -43,10 +47,12 @@
EXPAND_COUNTER(item_shrink_dirty_abort) \
EXPAND_COUNTER(item_shrink_skip_inced) \
EXPAND_COUNTER(item_shrink_range) \
EXPAND_COUNTER(item_shrink)
EXPAND_COUNTER(item_shrink) \
EXPAND_COUNTER(lock_alloc) \
EXPAND_COUNTER(lock_free)
#define FIRST_COUNTER alloc_alloc
#define LAST_COUNTER item_shrink
#define LAST_COUNTER lock_free
#undef EXPAND_COUNTER
#define EXPAND_COUNTER(which) struct percpu_counter which;

View File

@@ -103,6 +103,7 @@ static u8 item_flags(struct cached_item *item)
static void free_item(struct super_block *sb, struct cached_item *item)
{
if (!IS_ERR_OR_NULL(item)) {
scoutfs_inc_counter(sb, item_free);
WARN_ON_ONCE(!list_empty(&item->entry));
WARN_ON_ONCE(!RB_EMPTY_NODE(&item->node));
scoutfs_key_free(sb, item->key);
@@ -133,6 +134,9 @@ static struct cached_item *alloc_item(struct super_block *sb,
}
}
if (item)
scoutfs_inc_counter(sb, item_alloc);
return item;
}
@@ -563,6 +567,7 @@ static bool check_range(struct super_block *sb, struct rb_root *root,
static void free_range(struct super_block *sb, struct cached_range *rng)
{
if (!IS_ERR_OR_NULL(rng)) {
scoutfs_inc_counter(sb, item_range_free);
scoutfs_key_free(sb, rng->start);
scoutfs_key_free(sb, rng->end);
kfree(rng);
@@ -1099,6 +1104,7 @@ int scoutfs_item_insert_batch(struct super_block *sb, struct list_head *list,
if (WARN_ON_ONCE(scoutfs_key_compare(start, end) > 0))
return -EINVAL;
scoutfs_inc_counter(sb, item_range_alloc);
rng = kzalloc(sizeof(struct cached_range), GFP_NOFS);
if (rng) {
rng->start = scoutfs_key_dup(sb, start);
@@ -1657,6 +1663,7 @@ int scoutfs_item_invalidate(struct super_block *sb,
/* XXX think about racing with trans write */
scoutfs_inc_counter(sb, item_range_alloc);
rng = kzalloc(sizeof(struct cached_range), GFP_NOFS);
if (rng) {
rng->start = scoutfs_key_alloc(sb, SCOUTFS_MAX_KEY_SIZE);

View File

@@ -27,6 +27,7 @@
#include "dlmglue.h"
#include "inode.h"
#include "trans.h"
#include "counters.h"
#define LN_FMT "%u.%u.%u.%llu.%llu"
#define LN_ARG(name) \
@@ -107,6 +108,7 @@ static void free_scoutfs_lock(struct scoutfs_lock *lock)
if (lock) {
linfo = SCOUTFS_SB(lock->sb)->lock_info;
scoutfs_inc_counter(lock->sb, lock_free);
ocfs2_lock_res_free(&lock->lockres);
scoutfs_key_free(lock->sb, lock->start);
scoutfs_key_free(lock->sb, lock->end);
@@ -294,6 +296,7 @@ search:
found->sequence = ++linfo->seq_cnt;
rb_link_node(&found->node, parent, node);
rb_insert_color(&found->node, &linfo->lock_tree);
scoutfs_inc_counter(sb, lock_alloc);
}
found->refcnt++;
if (test_bit(SCOUTFS_LOCK_RECLAIM, &found->flags)) {