Allow read-mostly _alloc_meta_low

Each transaction hold makes multiple calls to _alloc_meta_low to see if
the transaction should be committed to refill allocators before the
caller's hold is acquired and they can dirty blocks in the transaction.

_alloc_meta_low was using a spinlock to sample the allocator list_head
blocks to determine if there was space available.  The lock and unlock
stores were creating significant cacheline contention.

The _alloc_meta_low calls are higher frequency than allocations.  We can
use a seqlock to have exclusive writers and allow concurrent
_alloc_meta_low readers who retry if a writer intervenes.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2021-03-02 09:55:45 -08:00
parent e163f3b099
commit c470c1c9f6
2 changed files with 22 additions and 11 deletions

View File

@@ -252,7 +252,7 @@ void scoutfs_alloc_init(struct scoutfs_alloc *alloc,
{
memset(alloc, 0, sizeof(struct scoutfs_alloc));
spin_lock_init(&alloc->lock);
seqlock_init(&alloc->seqlock);
mutex_init(&alloc->mutex);
alloc->avail = *avail;
alloc->freed = *freed;
@@ -607,7 +607,8 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
if (ret < 0)
goto out;
spin_lock(&alloc->lock);
write_seqlock(&alloc->seqlock);
lblk = alloc->dirty_avail_bl->data;
if (WARN_ON_ONCE(lblk->nr == 0)) {
/* shouldn't happen, transaction should commit first */
@@ -617,7 +618,8 @@ int scoutfs_alloc_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
list_block_remove(&alloc->avail, lblk, 1);
ret = 0;
}
spin_unlock(&alloc->lock);
write_sequnlock(&alloc->seqlock);
out:
if (ret < 0)
@@ -640,7 +642,8 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
if (ret < 0)
goto out;
spin_lock(&alloc->lock);
write_seqlock(&alloc->seqlock);
lblk = alloc->dirty_freed_bl->data;
if (WARN_ON_ONCE(list_block_space(lblk->nr) == 0)) {
/* shouldn't happen, transaction should commit first */
@@ -649,7 +652,8 @@ int scoutfs_free_meta(struct super_block *sb, struct scoutfs_alloc *alloc,
list_block_add(&alloc->freed, lblk, blkno);
ret = 0;
}
spin_unlock(&alloc->lock);
write_sequnlock(&alloc->seqlock);
out:
scoutfs_inc_counter(sb, alloc_free_meta);
@@ -1147,17 +1151,23 @@ out:
/*
* Returns true if meta avail and free don't have room for the given
* number of alloctions or frees.
* number of allocations or frees. This is called at a significantly
* higher frequency than allocations as writers try to enter
* transactions. This is the only reader of the seqlock which gives
* read-mostly sampling instead of bouncing a spinlock around all the
* cores.
*/
bool scoutfs_alloc_meta_low(struct super_block *sb,
struct scoutfs_alloc *alloc, u32 nr)
{
unsigned int seq;
bool lo;
spin_lock(&alloc->lock);
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
list_block_space(alloc->freed.first_nr) < nr;
spin_unlock(&alloc->lock);
do {
seq = read_seqbegin(&alloc->seqlock);
lo = le32_to_cpu(alloc->avail.first_nr) < nr ||
list_block_space(alloc->freed.first_nr) < nr;
} while (read_seqretry(&alloc->seqlock, seq));
return lo;
}

View File

@@ -72,7 +72,8 @@
* transaction.
*/
struct scoutfs_alloc {
spinlock_t lock;
/* writers rarely modify list_head avail/freed. readers often check for _meta_alloc_low */
seqlock_t seqlock;
struct mutex mutex;
struct scoutfs_block *dirty_avail_bl;
struct scoutfs_block *dirty_freed_bl;