Merge pull request #176 from versity/zab/accumulated_fixes

Zab/accumulated fixes
This commit is contained in:
Zach Brown
2024-06-26 13:21:50 -07:00
committed by GitHub
10 changed files with 67 additions and 89 deletions

View File

@@ -1807,37 +1807,6 @@ int scoutfs_data_wait_check_iov(struct inode *inode, const struct iovec *iov,
return ret;
}
int scoutfs_data_wait_check_iter(struct inode *inode, loff_t pos, struct iov_iter *iter,
u8 sef, u8 op, struct scoutfs_data_wait *dw,
struct scoutfs_lock *lock)
{
size_t count = iov_iter_count(iter);
size_t off = iter->iov_offset;
const struct iovec *iov;
size_t len;
int ret = 0;
for (iov = iter->iov; count > 0; iov++) {
len = iov->iov_len - off;
if (len == 0)
continue;
/* aren't we waiting on too much data here ? */
ret = scoutfs_data_wait_check(inode, pos, len,
sef, op, dw, lock);
if (ret != 0)
break;
pos += len;
count -= len;
off = 0;
}
return ret;
}
int scoutfs_data_wait(struct inode *inode, struct scoutfs_data_wait *dw)
{
DECLARE_DATA_WAIT_ROOT(inode->i_sb, rt);

View File

@@ -65,9 +65,6 @@ int scoutfs_data_wait_check_iov(struct inode *inode, const struct iovec *iov,
unsigned long nr_segs, loff_t pos, u8 sef,
u8 op, struct scoutfs_data_wait *ow,
struct scoutfs_lock *lock);
int scoutfs_data_wait_check_iter(struct inode *inode, loff_t pos, struct iov_iter *iter,
u8 sef, u8 op, struct scoutfs_data_wait *ow,
struct scoutfs_lock *lock);
bool scoutfs_data_wait_found(struct scoutfs_data_wait *ow);
int scoutfs_data_wait(struct inode *inode,
struct scoutfs_data_wait *ow);

View File

@@ -931,7 +931,7 @@ static int scoutfs_unlink(struct inode *dir, struct dentry *dentry)
ret = lookup_dirent(sb, scoutfs_ino(dir), dentry->d_name.name, dentry->d_name.len, hash,
&dent, dir_lock);
if (ret < 0)
goto out;
goto unlock;
if (should_orphan(inode)) {
ret = scoutfs_lock_orphan(sb, SCOUTFS_LOCK_WRITE_ONLY, 0, scoutfs_ino(inode),

View File

@@ -171,10 +171,8 @@ retry:
goto out;
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
ret = scoutfs_data_wait_check_iter(inode, iocb->ki_pos, to,
SEF_OFFLINE,
SCOUTFS_IOC_DWO_READ,
&dw, scoutfs_inode_lock);
ret = scoutfs_data_wait_check(inode, iocb->ki_pos, iov_iter_count(to), SEF_OFFLINE,
SCOUTFS_IOC_DWO_READ, &dw, scoutfs_inode_lock);
if (ret != 0)
goto out;
} else {
@@ -205,8 +203,7 @@ ssize_t scoutfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct scoutfs_lock *scoutfs_inode_lock = NULL;
SCOUTFS_DECLARE_PER_TASK_ENTRY(pt_ent);
DECLARE_DATA_WAIT(dw);
int ret;
int written;
ssize_t ret;
retry:
inode_lock(inode);
@@ -225,17 +222,15 @@ retry:
if (scoutfs_per_task_add_excl(&si->pt_data_lock, &pt_ent, scoutfs_inode_lock)) {
/* data_version is per inode, whole file must be online */
ret = scoutfs_data_wait_check_iter(inode, iocb->ki_pos, from,
SEF_OFFLINE,
SCOUTFS_IOC_DWO_WRITE,
&dw, scoutfs_inode_lock);
ret = scoutfs_data_wait_check(inode, 0, i_size_read(inode), SEF_OFFLINE,
SCOUTFS_IOC_DWO_WRITE, &dw, scoutfs_inode_lock);
if (ret != 0)
goto out;
}
/* XXX: remove SUID bit */
written = __generic_file_write_iter(iocb, from);
ret = __generic_file_write_iter(iocb, from);
out:
scoutfs_per_task_del(&si->pt_data_lock, &pt_ent);
@@ -248,10 +243,10 @@ out:
goto retry;
}
if (ret > 0 || ret == -EIOCBQUEUED)
ret = generic_write_sync(iocb, written);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
return written ? written : ret;
return ret;
}
#endif

View File

@@ -4,6 +4,7 @@
struct scoutfs_alloc;
struct scoutfs_block_writer;
struct scoutfs_block;
struct scoutfs_lock;
#include "btree.h"

View File

@@ -91,7 +91,7 @@ static void scoutfs_inode_ctor(void *obj)
init_rwsem(&si->extent_sem);
mutex_init(&si->item_mutex);
seqcount_init(&si->seqcount);
seqlock_init(&si->seqlock);
si->staging = false;
scoutfs_per_task_init(&si->pt_data_lock);
atomic64_set(&si->data_waitq.changed, 0);
@@ -566,11 +566,9 @@ static void set_trans_seq(struct inode *inode, u64 *seq)
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
if (*seq != sbi->trans_seq) {
preempt_disable();
write_seqcount_begin(&si->seqcount);
write_seqlock(&si->seqlock);
*seq = sbi->trans_seq;
write_seqcount_end(&si->seqcount);
preempt_enable();
write_sequnlock(&si->seqlock);
}
}
@@ -592,22 +590,18 @@ void scoutfs_inode_inc_data_version(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
preempt_disable();
write_seqcount_begin(&si->seqcount);
write_seqlock(&si->seqlock);
si->data_version++;
write_seqcount_end(&si->seqcount);
preempt_enable();
write_sequnlock(&si->seqlock);
}
void scoutfs_inode_set_data_version(struct inode *inode, u64 data_version)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
preempt_disable();
write_seqcount_begin(&si->seqcount);
write_seqlock(&si->seqlock);
si->data_version = data_version;
write_seqcount_end(&si->seqcount);
preempt_enable();
write_sequnlock(&si->seqlock);
}
void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
@@ -616,8 +610,7 @@ void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
if (inode && (on || off)) {
si = SCOUTFS_I(inode);
preempt_disable();
write_seqcount_begin(&si->seqcount);
write_seqlock(&si->seqlock);
/* inode and extents out of sync, bad callers */
if (((s64)si->online_blocks + on < 0) ||
@@ -638,8 +631,7 @@ void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
si->online_blocks,
si->offline_blocks);
write_seqcount_end(&si->seqcount);
preempt_enable();
write_sequnlock(&si->seqlock);
}
/* any time offline extents decreased we try and wake waiters */
@@ -647,16 +639,16 @@ void scoutfs_inode_add_onoff(struct inode *inode, s64 on, s64 off)
scoutfs_data_wait_changed(inode);
}
static u64 read_seqcount_u64(struct inode *inode, u64 *val)
static u64 read_seqlock_u64(struct inode *inode, u64 *val)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
unsigned int seq;
unsigned seq;
u64 v;
do {
seq = read_seqcount_begin(&si->seqcount);
seq = read_seqbegin(&si->seqlock);
v = *val;
} while (read_seqcount_retry(&si->seqcount, seq));
} while (read_seqretry(&si->seqlock, seq));
return v;
}
@@ -665,33 +657,33 @@ u64 scoutfs_inode_meta_seq(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
return read_seqcount_u64(inode, &si->meta_seq);
return read_seqlock_u64(inode, &si->meta_seq);
}
u64 scoutfs_inode_data_seq(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
return read_seqcount_u64(inode, &si->data_seq);
return read_seqlock_u64(inode, &si->data_seq);
}
u64 scoutfs_inode_data_version(struct inode *inode)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
return read_seqcount_u64(inode, &si->data_version);
return read_seqlock_u64(inode, &si->data_version);
}
void scoutfs_inode_get_onoff(struct inode *inode, s64 *on, s64 *off)
{
struct scoutfs_inode_info *si = SCOUTFS_I(inode);
unsigned int seq;
unsigned seq;
do {
seq = read_seqcount_begin(&si->seqcount);
seq = read_seqbegin(&si->seqlock);
*on = SCOUTFS_I(inode)->online_blocks;
*off = SCOUTFS_I(inode)->offline_blocks;
} while (read_seqcount_retry(&si->seqcount, seq));
} while (read_seqretry(&si->seqlock, seq));
}
static int scoutfs_iget_test(struct inode *inode, void *arg)

View File

@@ -47,7 +47,7 @@ struct scoutfs_inode_info {
atomic64_t last_refreshed;
/* initialized once for slab object */
seqcount_t seqcount;
seqlock_t seqlock;
bool staging; /* holder of i_mutex is staging */
struct scoutfs_per_task pt_data_lock;
struct scoutfs_data_waitq data_waitq;

View File

@@ -24,6 +24,7 @@
#include "item.h"
#include "forest.h"
#include "block.h"
#include "msg.h"
#include "trans.h"
#include "counters.h"
#include "scoutfs_trace.h"
@@ -1670,13 +1671,24 @@ out:
return ret;
}
static int lock_safe(struct scoutfs_lock *lock, struct scoutfs_key *key,
static int lock_safe(struct super_block *sb, struct scoutfs_lock *lock, struct scoutfs_key *key,
int mode)
{
if (WARN_ON_ONCE(!scoutfs_lock_protected(lock, key, mode)))
bool prot = scoutfs_lock_protected(lock, key, mode);
if (!prot) {
static bool once = false;
if (!once) {
scoutfs_err(sb, "lock (start "SK_FMT" end "SK_FMT" mode 0x%x) does not protect operation (key "SK_FMT" mode 0x%x)",
SK_ARG(&lock->start), SK_ARG(&lock->end), lock->mode,
SK_ARG(key), mode);
dump_stack();
once = true;
}
return -EINVAL;
else
return 0;
}
return 0;
}
static int optional_lock_mode_match(struct scoutfs_lock *lock, int mode)
@@ -1718,7 +1730,7 @@ int scoutfs_item_lookup(struct super_block *sb, struct scoutfs_key *key,
scoutfs_inc_counter(sb, item_lookup);
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_READ)))
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_READ)))
goto out;
ret = get_cached_page(sb, cinf, lock, key, false, false, 0, &pg);
@@ -1793,7 +1805,7 @@ int scoutfs_item_next(struct super_block *sb, struct scoutfs_key *key,
goto out;
}
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_READ)))
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_READ)))
goto out;
pos = *key;
@@ -1874,7 +1886,7 @@ int scoutfs_item_dirty(struct super_block *sb, struct scoutfs_key *key,
scoutfs_inc_counter(sb, item_dirty);
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE)))
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE)))
goto out;
ret = scoutfs_forest_set_bloom_bits(sb, lock);
@@ -1920,7 +1932,7 @@ static int item_create(struct super_block *sb, struct scoutfs_key *key,
scoutfs_inc_counter(sb, item_create);
if ((ret = lock_safe(lock, key, mode)) ||
if ((ret = lock_safe(sb, lock, key, mode)) ||
(ret = optional_lock_mode_match(primary, SCOUTFS_LOCK_WRITE)))
goto out;
@@ -1963,7 +1975,7 @@ int scoutfs_item_create(struct super_block *sb, struct scoutfs_key *key,
void *val, int val_len, struct scoutfs_lock *lock)
{
return item_create(sb, key, val, val_len, lock, NULL,
SCOUTFS_LOCK_READ, false);
SCOUTFS_LOCK_WRITE, false);
}
int scoutfs_item_create_force(struct super_block *sb, struct scoutfs_key *key,
@@ -1994,7 +2006,7 @@ int scoutfs_item_update(struct super_block *sb, struct scoutfs_key *key,
scoutfs_inc_counter(sb, item_update);
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE)))
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE)))
goto out;
ret = scoutfs_forest_set_bloom_bits(sb, lock);
@@ -2062,7 +2074,7 @@ int scoutfs_item_delta(struct super_block *sb, struct scoutfs_key *key,
scoutfs_inc_counter(sb, item_delta);
if ((ret = lock_safe(lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
if ((ret = lock_safe(sb, lock, key, SCOUTFS_LOCK_WRITE_ONLY)))
goto out;
ret = scoutfs_forest_set_bloom_bits(sb, lock);
@@ -2135,7 +2147,7 @@ static int item_delete(struct super_block *sb, struct scoutfs_key *key,
scoutfs_inc_counter(sb, item_delete);
if ((ret = lock_safe(lock, key, mode)) ||
if ((ret = lock_safe(sb, lock, key, mode)) ||
(ret = optional_lock_mode_match(primary, SCOUTFS_LOCK_WRITE)))
goto out;

View File

@@ -119,6 +119,16 @@ static int do_change_fmt_vers(struct change_fmt_vers_args *args)
goto out;
}
if (le64_to_cpu(meta_super->fmt_vers) > args->fmt_vers ||
le64_to_cpu(data_super->fmt_vers) > args->fmt_vers) {
ret = -EPERM;
printf("Downgrade of Meta Format Version: %llu and Data Format Version: %llu to Format Version: %llu is not allowed\n",
le64_to_cpu(meta_super->fmt_vers),
le64_to_cpu(data_super->fmt_vers),
args->fmt_vers);
goto out;
}
if (le64_to_cpu(meta_super->fmt_vers) != args->fmt_vers) {
meta_super->fmt_vers = cpu_to_le64(args->fmt_vers);

View File

@@ -274,6 +274,8 @@ static int do_mkfs(struct mkfs_args *args)
inode.ctime.nsec = inode.atime.nsec;
inode.mtime.sec = inode.atime.sec;
inode.mtime.nsec = inode.atime.nsec;
inode.crtime.sec = inode.atime.sec;
inode.crtime.nsec = inode.atime.nsec;
btree_append_item(bt, &key, &inode, sizeof(inode));
ret = write_block(meta_fd, SCOUTFS_BLOCK_MAGIC_BTREE, fsid, 1, blkno,