scoutfs: add flag to refresh inode after locking

Lock callers can specify that they want inode fields reread from items
after the lock is acquired.   dlmglue sets a refresh_gen in the locks
that we store in inodes to track when they were last refreshed and if
they need a refresh.

Signed-off-by: Zach Brown <zab@versity.com>
This commit is contained in:
Zach Brown
2017-08-23 15:49:25 -07:00
parent d2a1b915fc
commit fdbe0de8e9
5 changed files with 88 additions and 12 deletions

View File

@@ -509,7 +509,7 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
if (ret)
goto out_unlock;
inode = scoutfs_new_inode(sb, dir, mode, rdev);
inode = scoutfs_new_inode(sb, dir, mode, rdev, dir_lock);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;
@@ -881,7 +881,7 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
if (ret)
goto out_unlock;
inode = scoutfs_new_inode(sb, dir, S_IFLNK|S_IRWXUGO, 0);
inode = scoutfs_new_inode(sb, dir, S_IFLNK|S_IRWXUGO, 0, dir_lock);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;

View File

@@ -235,7 +235,19 @@ static void load_inode(struct inode *inode, struct scoutfs_inode *cinode)
set_item_info(ci, cinode);
}
static int refresh_inode(struct inode *inode, struct scoutfs_lock *lock)
/*
* Refresh the vfs inode fields if the lock indicates that the current
* contents could be stale.
*
* This can be racing with many lock holders of an inode. A bunch of
* readers can be checking to refresh while one of them is refreshing.
*
* The vfs inode field updates can't be racing with valid readers of the
* fields because they should have already had a locked refreshed inode
* to be dereferencing its contents.
*/
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
int flags)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
struct super_block *sb = inode->i_sb;
@@ -243,15 +255,34 @@ static int refresh_inode(struct inode *inode, struct scoutfs_lock *lock)
struct scoutfs_inode_key ikey;
struct scoutfs_inode sinode;
SCOUTFS_DECLARE_KVEC(val);
const u64 refresh_gen = scoutfs_lock_refresh_gen(lock);
int ret;
/*
* Lock refresh gens are supposed to strictly increase. Inodes
* having a greater gen means memory corruption or
* lifetime/logic bugs that could stop the inode from refreshing
* and expose stale data.
*/
BUG_ON(atomic64_read(&si->last_refreshed) > refresh_gen);
if (atomic64_read(&si->last_refreshed) == refresh_gen)
return 0;
scoutfs_inode_init_key(&key, &ikey, scoutfs_ino(inode));
scoutfs_kvec_init(val, &sinode, sizeof(sinode));
mutex_lock(&si->item_mutex);
ret = scoutfs_item_lookup_exact(sb, &key, val, sizeof(sinode), lock->end);
if (ret == 0)
load_inode(inode, &sinode);
if (atomic64_read(&si->last_refreshed) < refresh_gen) {
ret = scoutfs_item_lookup_exact(sb, &key, val, sizeof(sinode),
lock->end);
if (ret == 0) {
load_inode(inode, &sinode);
atomic64_set(&si->last_refreshed, refresh_gen);
}
} else {
ret = 0;
}
mutex_unlock(&si->item_mutex);
return ret;
@@ -279,7 +310,7 @@ static int scoutfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (ret)
return ret;
ret = refresh_inode(inode, lock);
ret = scoutfs_inode_refresh(inode, lock, 0);
if (ret == 0)
generic_fillattr(inode, stat);
@@ -404,7 +435,10 @@ struct inode *scoutfs_iget(struct super_block *sb, u64 ino)
}
if (inode->i_state & I_NEW) {
ret = refresh_inode(inode, lock);
/* XXX ensure refresh, instead clear in drop_inode? */
atomic64_set(&SCOUTFS_I(inode)->last_refreshed, 0);
ret = scoutfs_inode_refresh(inode, lock, 0);
if (ret) {
iget_failed(inode);
inode = ERR_PTR(ret);
@@ -771,7 +805,8 @@ out:
* creating links to it and updating it. @dir can be null.
*/
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode, dev_t rdev)
umode_t mode, dev_t rdev,
struct scoutfs_lock *lock)
{
struct scoutfs_inode_info *ci;
struct scoutfs_inode_key ikey;
@@ -797,6 +832,7 @@ struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
ci->data_version = 0;
ci->next_readdir_pos = SCOUTFS_DIRENT_FIRST_POS;
ci->have_item = false;
atomic64_set(&ci->last_refreshed, scoutfs_lock_refresh_gen(lock));
inode->i_ino = ino; /* XXX overflow */
inode_init_owner(inode, dir, mode);

View File

@@ -3,6 +3,8 @@
#include "key.h"
struct scoutfs_lock;
struct scoutfs_inode_info {
/* read or initialized for each inode instance */
u64 ino;
@@ -25,6 +27,9 @@ struct scoutfs_inode_info {
u64 item_meta_seq;
u64 item_data_seq;
/* updated at on each new lock acquisition */
atomic64_t last_refreshed;
/* initialized once for slab object */
seqcount_t seqcount;
bool staging; /* holder of i_mutex is staging */
@@ -58,7 +63,8 @@ int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_key_buf *end);
void scoutfs_update_inode_item(struct inode *inode);
void scoutfs_inode_fill_pool(struct super_block *sb, u64 ino, u64 nr);
struct inode *scoutfs_new_inode(struct super_block *sb, struct inode *dir,
umode_t mode, dev_t rdev);
umode_t mode, dev_t rdev,
struct scoutfs_lock *lock);
void scoutfs_inode_set_meta_seq(struct inode *inode);
void scoutfs_inode_set_data_seq(struct inode *inode);
void scoutfs_inode_inc_data_version(struct inode *inode);
@@ -66,6 +72,9 @@ u64 scoutfs_inode_meta_seq(struct inode *inode);
u64 scoutfs_inode_data_seq(struct inode *inode);
u64 scoutfs_inode_data_version(struct inode *inode);
int scoutfs_inode_refresh(struct inode *inode, struct scoutfs_lock *lock,
int flags);
int scoutfs_scan_orphans(struct super_block *sb);
void scoutfs_inode_queue_writeback(struct inode *inode);

View File

@@ -479,6 +479,11 @@ out:
#endif
}
u64 scoutfs_lock_refresh_gen(struct scoutfs_lock *lock)
{
return ocfs2_lock_refresh_gen(&lock->lockres);
}
int scoutfs_lock_ino(struct super_block *sb, int mode, int flags, u64 ino,
struct scoutfs_lock **ret_lock)
{
@@ -509,10 +514,33 @@ int scoutfs_lock_ino(struct super_block *sb, int mode, int flags, u64 ino,
&end, ret_lock);
}
/*
* Acquire a lock on an inode.
*
* _REFRESH_INODE indicates that the caller needs to have the vfs inode
* fields current with respect to lock coverage. dlmglue increases the
* lock's refresh_gen once every time its mode is changed from a mode
* that couldn't have the inode cached to one that could.
*/
int scoutfs_lock_inode(struct super_block *sb, int mode, int flags,
struct inode *inode, struct scoutfs_lock **ret_lock)
struct inode *inode, struct scoutfs_lock **lock)
{
return scoutfs_lock_ino(sb, mode, flags, scoutfs_ino(inode), ret_lock);
int ret;
ret = scoutfs_lock_ino(sb, mode, flags, scoutfs_ino(inode), lock);
if (ret < 0)
goto out;
if (flags & SCOUTFS_LKF_REFRESH_INODE) {
ret = scoutfs_inode_refresh(inode, *lock, flags);
if (ret < 0) {
scoutfs_unlock(sb, *lock, mode);
*lock = NULL;
}
}
out:
return ret;
}
/*

View File

@@ -8,6 +8,8 @@
#define SCOUTFS_LOCK_BLOCKING 0x01 /* Blocking another lock request */
#define SCOUTFS_LOCK_QUEUED 0x02 /* Put on drop workqueue */
#define SCOUTFS_LKF_REFRESH_INODE 0x01 /* update stale inode from item */
struct scoutfs_lock {
struct super_block *sb;
struct scoutfs_lock_name lock_name;
@@ -26,6 +28,7 @@ struct scoutfs_lock {
struct ocfs2_lock_res lockres;
};
u64 scoutfs_lock_refresh_gen(struct scoutfs_lock *lock);
int scoutfs_lock_inode(struct super_block *sb, int mode, int flags,
struct inode *inode, struct scoutfs_lock **ret_lock);
int scoutfs_lock_ino(struct super_block *sb, int mode, int flags, u64 ino,