mirror of
https://github.com/versity/scoutfs.git
synced 2026-01-08 21:03:12 +00:00
Compare commits
5 Commits
zab/block_
...
zab/invali
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a27c54568c | ||
|
|
cbb031bb5d | ||
|
|
c3290771a0 | ||
|
|
cf3cb3f197 | ||
|
|
cb4ed98b3c |
@@ -286,10 +286,16 @@ static int block_insert(struct super_block *sb, struct block_private *bp)
|
||||
|
||||
WARN_ON_ONCE(atomic_read(&bp->refcount) & BLOCK_REF_INSERTED);
|
||||
|
||||
retry:
|
||||
atomic_add(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
ret = rhashtable_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
ret = rhashtable_lookup_insert_fast(&binf->ht, &bp->ht_head, block_ht_params);
|
||||
if (ret < 0) {
|
||||
atomic_sub(BLOCK_REF_INSERTED, &bp->refcount);
|
||||
if (ret == -EBUSY) {
|
||||
/* wait for pending rebalance to finish */
|
||||
synchronize_rcu();
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
atomic_inc(&binf->total_inserted);
|
||||
TRACE_BLOCK(insert, bp);
|
||||
|
||||
@@ -713,7 +713,9 @@ int scoutfs_lock_grant_response(struct super_block *sb,
|
||||
/*
|
||||
* Each lock has received a lock invalidation request from the server
|
||||
* which specifies a new mode for the lock. The server will only send
|
||||
* one invalidation request at a time for each lock.
|
||||
* one invalidation request at a time for each lock. The server can
|
||||
* send another invalidate request after we send the response but before
|
||||
* we reacquire the lock and finish invalidation.
|
||||
*
|
||||
* This is an unsolicited request from the server so it can arrive at
|
||||
* any time after we make the server aware of the lock by initially
|
||||
@@ -803,6 +805,9 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
ret = lock_invalidate(sb, lock, nl->old_mode, nl->new_mode);
|
||||
BUG_ON(ret);
|
||||
|
||||
/* allow another request after we respond but before we finish */
|
||||
lock->inv_net_id = 0;
|
||||
|
||||
/* respond with the key and modes from the request */
|
||||
ret = scoutfs_client_lock_response(sb, net_id, nl);
|
||||
BUG_ON(ret);
|
||||
@@ -814,11 +819,13 @@ static void lock_invalidate_worker(struct work_struct *work)
|
||||
spin_lock(&linfo->lock);
|
||||
|
||||
list_for_each_entry_safe(lock, tmp, &ready, inv_head) {
|
||||
list_del_init(&lock->inv_head);
|
||||
|
||||
lock->invalidate_pending = 0;
|
||||
trace_scoutfs_lock_invalidated(sb, lock);
|
||||
wake_up(&lock->waitq);
|
||||
if (lock->inv_net_id == 0) {
|
||||
/* finish if another request didn't arrive */
|
||||
list_del_init(&lock->inv_head);
|
||||
lock->invalidate_pending = 0;
|
||||
wake_up(&lock->waitq);
|
||||
}
|
||||
put_lock(linfo, lock);
|
||||
}
|
||||
|
||||
@@ -833,34 +840,47 @@ out:
|
||||
}
|
||||
|
||||
/*
|
||||
* Record an incoming invalidate request from the server and add its lock
|
||||
* to the list for processing.
|
||||
* Record an incoming invalidate request from the server and add its
|
||||
* lock to the list for processing. This request can be from a new
|
||||
* server and racing with invalidation that frees from an old server.
|
||||
* It's fine to not find the requested lock and send an immediate
|
||||
* response.
|
||||
*
|
||||
* This is trusting the server and will crash if it's sent bad requests :/
|
||||
* The invalidation process drops the linfo lock to send responses. The
|
||||
* moment it does so we can receive another invalidation request (the
|
||||
* server can ask us to go from write->read then read->null). We allow
|
||||
* for one chain like this but it's a bug if we receive more concurrent
|
||||
* invalidation requests than that. The server should be only sending
|
||||
* one at a time.
|
||||
*/
|
||||
int scoutfs_lock_invalidate_request(struct super_block *sb, u64 net_id,
|
||||
struct scoutfs_net_lock *nl)
|
||||
{
|
||||
DECLARE_LOCK_INFO(sb, linfo);
|
||||
struct scoutfs_lock *lock;
|
||||
int ret = 0;
|
||||
|
||||
scoutfs_inc_counter(sb, lock_invalidate_request);
|
||||
|
||||
spin_lock(&linfo->lock);
|
||||
lock = get_lock(sb, &nl->key);
|
||||
BUG_ON(!lock);
|
||||
if (lock) {
|
||||
BUG_ON(lock->invalidate_pending);
|
||||
lock->invalidate_pending = 1;
|
||||
lock->inv_nl = *nl;
|
||||
BUG_ON(lock->inv_net_id != 0);
|
||||
lock->inv_net_id = net_id;
|
||||
list_add_tail(&lock->inv_head, &linfo->inv_list);
|
||||
lock->inv_nl = *nl;
|
||||
if (list_empty(&lock->inv_head)) {
|
||||
list_add_tail(&lock->inv_head, &linfo->inv_list);
|
||||
lock->invalidate_pending = 1;
|
||||
}
|
||||
trace_scoutfs_lock_invalidate_request(sb, lock);
|
||||
queue_inv_work(linfo);
|
||||
}
|
||||
spin_unlock(&linfo->lock);
|
||||
|
||||
return 0;
|
||||
if (!lock)
|
||||
ret = scoutfs_client_lock_response(sb, net_id, nl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user