Remove item accounting

Remove kmod/src/count.h
Remove scoutfs_trans_track_item()
Remove reserved/actual fields from scoutfs_reservation

Signed-off-by: Andy Grover <agrover@versity.com>
This commit is contained in:
Andy Grover
2021-01-16 13:24:12 -08:00
parent b370730029
commit bed33c7ffd
10 changed files with 44 additions and 528 deletions

View File

@@ -1,315 +0,0 @@
#ifndef _SCOUTFS_COUNT_H_
#define _SCOUTFS_COUNT_H_
/*
* Our estimate of the space consumed while dirtying items is based on
* the number of items and the size of their values.
*
* The estimate is still a read-only input to entering the transaction.
* We'd like to use it as a clean rhs arg to hold_trans. We define SIC_
* functions which return the count struct. This lets us have a single
* arg and avoid bugs in initializing and passing in struct pointers
* from callers. The internal __count functions are used compose an
* estimate out of the sets of items it manipulates. We program in much
* clearer C instead of in the preprocessor.
*
* Compilers are able to collapse the inlines into constants for the
* constant estimates.
*/
struct scoutfs_item_count {
signed items;
signed vals;
};
/* The caller knows exactly what they're doing. */
static inline const struct scoutfs_item_count SIC_EXACT(signed items,
signed vals)
{
struct scoutfs_item_count cnt = {
.items = items,
.vals = vals,
};
return cnt;
}
/*
* Allocating an inode creates a new set of indexed items.
*/
static inline void __count_alloc_inode(struct scoutfs_item_count *cnt)
{
const int nr_indices = SCOUTFS_INODE_INDEX_NR;
cnt->items += 1 + nr_indices;
cnt->vals += sizeof(struct scoutfs_inode);
}
/*
* Dirtying an inode dirties the inode item and can delete and create
* the full set of indexed items.
*/
static inline void __count_dirty_inode(struct scoutfs_item_count *cnt)
{
const int nr_indices = 2 * SCOUTFS_INODE_INDEX_NR;
cnt->items += 1 + nr_indices;
cnt->vals += sizeof(struct scoutfs_inode);
}
static inline const struct scoutfs_item_count SIC_ALLOC_INODE(void)
{
struct scoutfs_item_count cnt = {0,};
__count_alloc_inode(&cnt);
return cnt;
}
static inline const struct scoutfs_item_count SIC_DIRTY_INODE(void)
{
struct scoutfs_item_count cnt = {0,};
__count_dirty_inode(&cnt);
return cnt;
}
/*
* Directory entries are stored in three items.
*/
static inline void __count_dirents(struct scoutfs_item_count *cnt,
unsigned name_len)
{
cnt->items += 3;
cnt->vals += 3 * offsetof(struct scoutfs_dirent, name[name_len]);
}
static inline void __count_sym_target(struct scoutfs_item_count *cnt,
unsigned size)
{
unsigned nr = DIV_ROUND_UP(size, SCOUTFS_MAX_VAL_SIZE);
cnt->items += nr;
cnt->vals += size;
}
static inline void __count_orphan(struct scoutfs_item_count *cnt)
{
cnt->items += 1;
}
static inline void __count_mknod(struct scoutfs_item_count *cnt,
unsigned name_len)
{
__count_alloc_inode(cnt);
__count_dirents(cnt, name_len);
__count_dirty_inode(cnt);
}
static inline const struct scoutfs_item_count SIC_MKNOD(unsigned name_len)
{
struct scoutfs_item_count cnt = {0,};
__count_mknod(&cnt, name_len);
return cnt;
}
/*
* Dropping the inode deletes all its items. Potentially enormous numbers
* of items (data mapping, xattrs) are deleted in their own transactions.
*/
static inline const struct scoutfs_item_count SIC_DROP_INODE(int mode,
u64 size)
{
struct scoutfs_item_count cnt = {0,};
if (S_ISLNK(mode))
__count_sym_target(&cnt, size);
__count_dirty_inode(&cnt);
__count_orphan(&cnt);
cnt.vals = 0;
return cnt;
}
static inline const struct scoutfs_item_count SIC_LINK(unsigned name_len)
{
struct scoutfs_item_count cnt = {0,};
__count_dirents(&cnt, name_len);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
return cnt;
}
/*
* Unlink can add orphan items.
*/
static inline const struct scoutfs_item_count SIC_UNLINK(unsigned name_len)
{
struct scoutfs_item_count cnt = {0,};
__count_dirents(&cnt, name_len);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
__count_orphan(&cnt);
return cnt;
}
static inline const struct scoutfs_item_count SIC_SYMLINK(unsigned name_len,
unsigned size)
{
struct scoutfs_item_count cnt = {0,};
__count_mknod(&cnt, name_len);
__count_sym_target(&cnt, size);
return cnt;
}
/*
* This assumes the worst case of a rename between directories that
* unlinks an existing target. That'll be worse than the common case
* by a few hundred bytes.
*/
static inline const struct scoutfs_item_count SIC_RENAME(unsigned old_len,
unsigned new_len)
{
struct scoutfs_item_count cnt = {0,};
/* dirty dirs and inodes */
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
__count_dirty_inode(&cnt);
/* unlink old and new, link new */
__count_dirents(&cnt, old_len);
__count_dirents(&cnt, new_len);
__count_dirents(&cnt, new_len);
/* orphan the existing target */
__count_orphan(&cnt);
return cnt;
}
/*
* Creating an xattr results in a dirty set of items with values that
* store the xattr header, name, and value. There's always at least one
* item with the header and name. Any previously existing items are
* deleted which dirties their key but removes their value. The two
* sets of items are indexed by different ids so their items don't
* overlap.
*/
static inline const struct scoutfs_item_count SIC_XATTR_SET(unsigned old_parts,
bool creating,
unsigned name_len,
unsigned size)
{
struct scoutfs_item_count cnt = {0,};
unsigned int new_parts;
__count_dirty_inode(&cnt);
if (old_parts)
cnt.items += old_parts;
if (creating) {
new_parts = SCOUTFS_XATTR_NR_PARTS(name_len, size);
cnt.items += new_parts;
cnt.vals += sizeof(struct scoutfs_xattr) + name_len + size;
}
return cnt;
}
/*
* write_begin can have to allocate all the blocks in the page and can
* have to add a big allocation from the server to do so:
* - merge added free extents from the server
* - remove a free extent per block
* - remove an offline extent for every other block
* - add a file extent per block
*/
static inline const struct scoutfs_item_count SIC_WRITE_BEGIN(void)
{
struct scoutfs_item_count cnt = {0,};
unsigned nr_free = (1 + SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
unsigned nr_file = (DIV_ROUND_UP(SCOUTFS_BLOCK_SM_PER_PAGE, 2) +
SCOUTFS_BLOCK_SM_PER_PAGE) * 3;
__count_dirty_inode(&cnt);
cnt.items += nr_free + nr_file;
cnt.vals += nr_file;
return cnt;
}
/*
* Truncating an extent can:
* - delete existing file extent,
* - create two surrounding file extents,
* - add an offline file extent,
* - delete two existing free extents
* - create a merged free extent
*/
static inline const struct scoutfs_item_count
SIC_TRUNC_EXTENT(struct inode *inode)
{
struct scoutfs_item_count cnt = {0,};
unsigned int nr_file = 1 + 2 + 1;
unsigned int nr_free = (2 + 1) * 2;
if (inode)
__count_dirty_inode(&cnt);
cnt.items += nr_file + nr_free;
cnt.vals += nr_file;
return cnt;
}
/*
* Fallocating an extent can, at most:
* - allocate from the server: delete two free and insert merged
* - free an allocated extent: delete one and create two split
* - remove an unallocated file extent: delete one and create two split
* - add an fallocated flie extent: delete two and inset one merged
*/
static inline const struct scoutfs_item_count SIC_FALLOCATE_ONE(void)
{
struct scoutfs_item_count cnt = {0,};
unsigned int nr_free = ((1 + 2) * 2) * 2;
unsigned int nr_file = (1 + 2) * 2;
__count_dirty_inode(&cnt);
cnt.items += nr_free + nr_file;
cnt.vals += nr_file;
return cnt;
}
/*
* ioc_setattr_more can dirty the inode and add a single offline extent.
*/
static inline const struct scoutfs_item_count SIC_SETATTR_MORE(void)
{
struct scoutfs_item_count cnt = {0,};
__count_dirty_inode(&cnt);
cnt.items++;
return cnt;
}
#endif

View File

@@ -37,7 +37,6 @@
#include "lock.h"
#include "file.h"
#include "msg.h"
#include "count.h"
#include "ext.h"
#include "util.h"
@@ -291,7 +290,6 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
u64 ino, u64 iblock, u64 last, bool offline,
struct scoutfs_lock *lock)
{
struct scoutfs_item_count cnt = SIC_TRUNC_EXTENT(inode);
struct scoutfs_inode_info *si = NULL;
LIST_HEAD(ind_locks);
s64 ret = 0;
@@ -315,9 +313,9 @@ int scoutfs_data_truncate_items(struct super_block *sb, struct inode *inode,
while (iblock <= last) {
if (inode)
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks,
true, cnt);
true);
else
ret = scoutfs_hold_trans(sb, cnt);
ret = scoutfs_hold_trans(sb);
if (ret)
break;
@@ -758,8 +756,7 @@ static int scoutfs_write_begin(struct file *file,
scoutfs_inode_index_prepare(sb, &wbd->ind_locks, inode,
true) ?:
scoutfs_inode_index_try_lock_hold(sb, &wbd->ind_locks,
ind_seq,
SIC_WRITE_BEGIN());
ind_seq);
} while (ret > 0);
if (ret < 0)
goto out;
@@ -1007,8 +1004,7 @@ long scoutfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
while(iblock <= last) {
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_FALLOCATE_ONE());
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
if (ret)
goto out;
@@ -1078,8 +1074,7 @@ int scoutfs_data_init_offline_extent(struct inode *inode, u64 size,
}
/* we're updating meta_seq with offline block count */
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_SETATTR_MORE());
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
if (ret < 0)
goto out;
@@ -1224,8 +1219,7 @@ int scoutfs_data_move_blocks(struct inode *from, u64 from_off,
ret = scoutfs_inode_index_start(sb, &seq) ?:
scoutfs_inode_index_prepare(sb, &locks, from, true) ?:
scoutfs_inode_index_prepare(sb, &locks, to, true) ?:
scoutfs_inode_index_try_lock_hold(sb, &locks, seq,
SIC_EXACT(1, 1));
scoutfs_inode_index_try_lock_hold(sb, &locks, seq);
if (ret > 0)
continue;
if (ret < 0)

View File

@@ -655,7 +655,6 @@ static int del_entry_items(struct super_block *sb, u64 dir_ino, u64 hash,
*/
static struct inode *lock_hold_create(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev,
const struct scoutfs_item_count cnt,
struct scoutfs_lock **dir_lock,
struct scoutfs_lock **inode_lock,
struct list_head *ind_locks)
@@ -694,7 +693,7 @@ retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, ind_locks, dir, true) ?:
scoutfs_inode_index_prepare_ino(sb, ind_locks, ino, mode) ?:
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq, cnt);
scoutfs_inode_index_try_lock_hold(sb, ind_locks, ind_seq);
if (ret > 0)
goto retry;
if (ret)
@@ -741,7 +740,6 @@ static int scoutfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
hash = dirent_name_hash(dentry->d_name.name, dentry->d_name.len);
inode = lock_hold_create(dir, dentry, mode, rdev,
SIC_MKNOD(dentry->d_name.len),
&dir_lock, &inode_lock, &ind_locks);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -836,8 +834,7 @@ retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_LINK(dentry->d_name.len));
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
if (ret > 0)
goto retry;
if (ret)
@@ -918,8 +915,7 @@ retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, dir, false) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_UNLINK(dentry->d_name.len));
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
if (ret > 0)
goto retry;
if (ret)
@@ -1154,7 +1150,6 @@ static int scoutfs_symlink(struct inode *dir, struct dentry *dentry,
return ret;
inode = lock_hold_create(dir, dentry, S_IFLNK|S_IRWXUGO, 0,
SIC_SYMLINK(dentry->d_name.len, name_len),
&dir_lock, &inode_lock, &ind_locks);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -1586,9 +1581,7 @@ retry:
scoutfs_inode_index_prepare(sb, &ind_locks, new_dir, false)) ?:
(new_inode == NULL ? 0 :
scoutfs_inode_index_prepare(sb, &ind_locks, new_inode, false)) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_RENAME(old_dentry->d_name.len,
new_dentry->d_name.len));
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
if (ret > 0)
goto retry;
if (ret)

View File

@@ -343,8 +343,7 @@ static int set_inode_size(struct inode *inode, struct scoutfs_lock *lock,
if (!S_ISREG(inode->i_mode))
return 0;
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true,
SIC_DIRTY_INODE());
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, true);
if (ret)
return ret;
@@ -371,8 +370,7 @@ static int clear_truncate_flag(struct inode *inode, struct scoutfs_lock *lock)
LIST_HEAD(ind_locks);
int ret;
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_DIRTY_INODE());
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
if (ret)
return ret;
@@ -487,8 +485,7 @@ retry:
}
}
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false,
SIC_DIRTY_INODE());
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, false);
if (ret)
goto out;
@@ -1189,8 +1186,7 @@ int scoutfs_inode_index_start(struct super_block *sb, u64 *seq)
* Returns > 0 if the seq changed and the locks should be retried.
*/
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
struct list_head *list, u64 seq,
const struct scoutfs_item_count cnt)
struct list_head *list, u64 seq)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct index_lock *ind_lock;
@@ -1206,7 +1202,7 @@ int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
goto out;
}
ret = scoutfs_hold_trans(sb, cnt);
ret = scoutfs_hold_trans(sb);
if (ret == 0 && seq != sbi->trans_seq) {
scoutfs_release_trans(sb);
ret = 1;
@@ -1220,8 +1216,7 @@ out:
}
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
bool set_data_seq,
const struct scoutfs_item_count cnt)
bool set_data_seq)
{
struct super_block *sb = inode->i_sb;
int ret;
@@ -1231,7 +1226,7 @@ int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
ret = scoutfs_inode_index_start(sb, &seq) ?:
scoutfs_inode_index_prepare(sb, list, inode,
set_data_seq) ?:
scoutfs_inode_index_try_lock_hold(sb, list, seq, cnt);
scoutfs_inode_index_try_lock_hold(sb, list, seq);
} while (ret > 0);
return ret;
@@ -1499,8 +1494,7 @@ static int delete_inode_items(struct super_block *sb, u64 ino)
retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
prepare_index_deletion(sb, &ind_locks, ino, mode, &sinode) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_DROP_INODE(mode, size));
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
if (ret > 0)
goto retry;
if (ret)

View File

@@ -4,7 +4,6 @@
#include "key.h"
#include "lock.h"
#include "per_task.h"
#include "count.h"
#include "format.h"
#include "data.h"
@@ -83,11 +82,9 @@ int scoutfs_inode_index_prepare_ino(struct super_block *sb,
struct list_head *list, u64 ino,
umode_t mode);
int scoutfs_inode_index_try_lock_hold(struct super_block *sb,
struct list_head *list, u64 seq,
const struct scoutfs_item_count cnt);
struct list_head *list, u64 seq);
int scoutfs_inode_index_lock_hold(struct inode *inode, struct list_head *list,
bool set_data_seq,
const struct scoutfs_item_count cnt);
bool set_data_seq);
void scoutfs_inode_index_unlock(struct super_block *sb, struct list_head *list);
int scoutfs_dirty_inode_item(struct inode *inode, struct scoutfs_lock *lock);

View File

@@ -674,8 +674,7 @@ static long scoutfs_ioc_setattr_more(struct file *file, unsigned long arg)
/* setting only so we don't see 0 data seq with nonzero data_version */
set_data_seq = sm.data_version != 0 ? true : false;
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq,
SIC_SETATTR_MORE());
ret = scoutfs_inode_index_lock_hold(inode, &ind_locks, set_data_seq);
if (ret)
goto unlock;

View File

@@ -31,7 +31,6 @@
#include "lock.h"
#include "super.h"
#include "ioctl.h"
#include "count.h"
#include "export.h"
#include "dir.h"
#include "server.h"
@@ -426,133 +425,59 @@ TRACE_EVENT(scoutfs_trans_write_func,
TRACE_EVENT(scoutfs_release_trans,
TP_PROTO(struct super_block *sb, void *rsv, unsigned int rsv_holders,
struct scoutfs_item_count *res,
struct scoutfs_item_count *act, unsigned int tri_holders,
unsigned int tri_writing, unsigned int tri_items,
unsigned int tri_vals),
unsigned int tri_holders,
unsigned int tri_writing),
TP_ARGS(sb, rsv, rsv_holders, res, act, tri_holders, tri_writing,
tri_items, tri_vals),
TP_ARGS(sb, rsv, rsv_holders, tri_holders, tri_writing),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(void *, rsv)
__field(unsigned int, rsv_holders)
__field(int, res_items)
__field(int, res_vals)
__field(int, act_items)
__field(int, act_vals)
__field(unsigned int, tri_holders)
__field(unsigned int, tri_writing)
__field(unsigned int, tri_items)
__field(unsigned int, tri_vals)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->rsv = rsv;
__entry->rsv_holders = rsv_holders;
__entry->res_items = res->items;
__entry->res_vals = res->vals;
__entry->act_items = act->items;
__entry->act_vals = act->vals;
__entry->tri_holders = tri_holders;
__entry->tri_writing = tri_writing;
__entry->tri_items = tri_items;
__entry->tri_vals = tri_vals;
),
TP_printk(SCSBF" rsv %p holders %u reserved %u.%u actual "
"%d.%d, trans holders %u writing %u reserved "
"%u.%u", SCSB_TRACE_ARGS, __entry->rsv, __entry->rsv_holders,
__entry->res_items, __entry->res_vals, __entry->act_items,
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
__entry->tri_items, __entry->tri_vals)
TP_printk(SCSBF" rsv %p holders %u trans holders %u writing %u",
SCSB_TRACE_ARGS, __entry->rsv, __entry->rsv_holders,
__entry->tri_holders, __entry->tri_writing)
);
TRACE_EVENT(scoutfs_trans_acquired_hold,
TP_PROTO(struct super_block *sb, const struct scoutfs_item_count *cnt,
TP_PROTO(struct super_block *sb,
void *rsv, unsigned int rsv_holders,
struct scoutfs_item_count *res,
struct scoutfs_item_count *act, unsigned int tri_holders,
unsigned int tri_writing, unsigned int tri_items,
unsigned int tri_vals),
unsigned int tri_holders,
unsigned int tri_writing),
TP_ARGS(sb, cnt, rsv, rsv_holders, res, act, tri_holders, tri_writing,
tri_items, tri_vals),
TP_ARGS(sb, rsv, rsv_holders, tri_holders, tri_writing),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(int, cnt_items)
__field(int, cnt_vals)
__field(void *, rsv)
__field(unsigned int, rsv_holders)
__field(int, res_items)
__field(int, res_vals)
__field(int, act_items)
__field(int, act_vals)
__field(unsigned int, tri_holders)
__field(unsigned int, tri_writing)
__field(unsigned int, tri_items)
__field(unsigned int, tri_vals)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->cnt_items = cnt->items;
__entry->cnt_vals = cnt->vals;
__entry->rsv = rsv;
__entry->rsv_holders = rsv_holders;
__entry->res_items = res->items;
__entry->res_vals = res->vals;
__entry->act_items = act->items;
__entry->act_vals = act->vals;
__entry->tri_holders = tri_holders;
__entry->tri_writing = tri_writing;
__entry->tri_items = tri_items;
__entry->tri_vals = tri_vals;
),
TP_printk(SCSBF" cnt %u.%u, rsv %p holders %u reserved %u.%u "
"actual %d.%d, trans holders %u writing %u reserved "
"%u.%u", SCSB_TRACE_ARGS, __entry->cnt_items,
__entry->cnt_vals, __entry->rsv, __entry->rsv_holders,
__entry->res_items, __entry->res_vals, __entry->act_items,
__entry->act_vals, __entry->tri_holders, __entry->tri_writing,
__entry->tri_items, __entry->tri_vals)
);
TRACE_EVENT(scoutfs_trans_track_item,
TP_PROTO(struct super_block *sb, int delta_items, int delta_vals,
int act_items, int act_vals, int res_items, int res_vals),
TP_ARGS(sb, delta_items, delta_vals, act_items, act_vals, res_items,
res_vals),
TP_STRUCT__entry(
SCSB_TRACE_FIELDS
__field(int, delta_items)
__field(int, delta_vals)
__field(int, act_items)
__field(int, act_vals)
__field(int, res_items)
__field(int, res_vals)
),
TP_fast_assign(
SCSB_TRACE_ASSIGN(sb);
__entry->delta_items = delta_items;
__entry->delta_vals = delta_vals;
__entry->act_items = act_items;
__entry->act_vals = act_vals;
__entry->res_items = res_items;
__entry->res_vals = res_vals;
),
TP_printk(SCSBF" delta_items %d delta_vals %d act_items %d act_vals %d res_items %d res_vals %d",
SCSB_TRACE_ARGS, __entry->delta_items, __entry->delta_vals,
__entry->act_items, __entry->act_vals, __entry->res_items,
__entry->res_vals)
TP_printk(SCSBF" rsv %p holders %u trans holders %u writing %u",
SCSB_TRACE_ARGS, __entry->rsv, __entry->rsv_holders,
__entry->tri_holders, __entry->tri_writing)
);
TRACE_EVENT(scoutfs_ioc_release,

View File

@@ -60,8 +60,6 @@
*/
struct trans_info {
spinlock_t lock;
unsigned reserved_items;
unsigned reserved_vals;
unsigned holders;
bool writing;
@@ -318,12 +316,11 @@ void scoutfs_trans_restart_sync_deadline(struct super_block *sb)
* Including nested holds avoids having to deal with writing out partial
* transactions while a caller still holds the transaction.
*/
#define SCOUTFS_RESERVATION_MAGIC 0xd57cd13b
struct scoutfs_reservation {
unsigned magic;
unsigned holders;
struct scoutfs_item_count reserved;
struct scoutfs_item_count actual;
};
/*
@@ -340,22 +337,16 @@ struct scoutfs_reservation {
* delaying or prematurely forcing commits.
*/
static bool acquired_hold(struct super_block *sb,
struct scoutfs_reservation *rsv,
const struct scoutfs_item_count *cnt)
struct scoutfs_reservation *rsv)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
DECLARE_TRANS_INFO(sb, tri);
bool acquired = false;
unsigned items;
unsigned vals;
spin_lock(&tri->lock);
trace_scoutfs_trans_acquired_hold(sb, cnt, rsv, rsv->holders,
&rsv->reserved, &rsv->actual,
tri->holders, tri->writing,
tri->reserved_items,
tri->reserved_vals);
trace_scoutfs_trans_acquired_hold(sb, rsv, rsv->holders,
tri->holders, tri->writing);
/* use a caller's existing reservation */
if (rsv->holders)
@@ -365,10 +356,6 @@ static bool acquired_hold(struct super_block *sb,
if (tri->writing)
goto out;
/* see if we can reserve space for our item count */
items = tri->reserved_items + cnt->items;
vals = tri->reserved_vals + cnt->vals;
/*
* In theory each dirty item page could be straddling two full
* blocks, requiring 4 allocations for each item cache page.
@@ -405,12 +392,6 @@ static bool acquired_hold(struct super_block *sb,
goto out;
}
tri->reserved_items = items;
tri->reserved_vals = vals;
rsv->reserved.items = cnt->items;
rsv->reserved.vals = cnt->vals;
hold:
rsv->holders++;
tri->holders++;
@@ -423,20 +404,12 @@ out:
return acquired;
}
int scoutfs_hold_trans(struct super_block *sb,
const struct scoutfs_item_count cnt)
int scoutfs_hold_trans(struct super_block *sb)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_reservation *rsv;
int ret;
/*
* Caller shouldn't provide garbage counts, nor counts that
* can't fit in segments by themselves.
*/
if (WARN_ON_ONCE(cnt.items <= 0 || cnt.vals < 0))
return -EINVAL;
if (current == sbi->trans_task)
return 0;
@@ -453,7 +426,7 @@ int scoutfs_hold_trans(struct super_block *sb,
BUG_ON(rsv->magic != SCOUTFS_RESERVATION_MAGIC);
ret = wait_event_interruptible(sbi->trans_hold_wq,
acquired_hold(sb, rsv, &cnt));
acquired_hold(sb, rsv));
if (ret && rsv->holders == 0) {
current->journal_info = NULL;
kfree(rsv);
@@ -473,38 +446,6 @@ bool scoutfs_trans_held(void)
return rsv && rsv->magic == SCOUTFS_RESERVATION_MAGIC;
}
/*
* Record a transaction holder's individual contribution to the dirty
* items in the current transaction. We're making sure that the
* reservation matches the possible item manipulations while they hold
* the reservation.
*
* It is possible and legitimate for an individual contribution to be
* negative if they delete dirty items. The item cache makes sure that
* the total dirty item count doesn't fall below zero.
*/
void scoutfs_trans_track_item(struct super_block *sb, signed items,
signed vals)
{
struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb);
struct scoutfs_reservation *rsv = current->journal_info;
if (current == sbi->trans_task)
return;
BUG_ON(!rsv || rsv->magic != SCOUTFS_RESERVATION_MAGIC);
rsv->actual.items += items;
rsv->actual.vals += vals;
trace_scoutfs_trans_track_item(sb, items, vals, rsv->actual.items,
rsv->actual.vals, rsv->reserved.items,
rsv->reserved.vals);
WARN_ON_ONCE(rsv->actual.items > rsv->reserved.items);
WARN_ON_ONCE(rsv->actual.vals > rsv->reserved.vals);
}
/*
* As we drop the last hold in the reservation we try and wake other
* hold attempts that were waiting for space. As we drop the last trans
@@ -526,16 +467,12 @@ void scoutfs_release_trans(struct super_block *sb)
spin_lock(&tri->lock);
trace_scoutfs_release_trans(sb, rsv, rsv->holders, &rsv->reserved,
&rsv->actual, tri->holders, tri->writing,
tri->reserved_items, tri->reserved_vals);
trace_scoutfs_release_trans(sb, rsv, rsv->holders, tri->holders, tri->writing);
BUG_ON(rsv->holders <= 0);
BUG_ON(tri->holders <= 0);
if (--rsv->holders == 0) {
tri->reserved_items -= rsv->reserved.items;
tri->reserved_vals -= rsv->reserved.vals;
current->journal_info = NULL;
kfree(rsv);
wake = true;

View File

@@ -6,21 +6,16 @@
/* the client will force commits if data allocators get too low */
#define SCOUTFS_TRANS_DATA_ALLOC_LWM (256ULL * 1024 * 1024)
#include "count.h"
void scoutfs_trans_write_func(struct work_struct *work);
int scoutfs_trans_sync(struct super_block *sb, int wait);
int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync);
void scoutfs_trans_restart_sync_deadline(struct super_block *sb);
int scoutfs_hold_trans(struct super_block *sb,
const struct scoutfs_item_count cnt);
int scoutfs_hold_trans(struct super_block *sb);
bool scoutfs_trans_held(void);
void scoutfs_release_trans(struct super_block *sb);
u64 scoutfs_trans_sample_seq(struct super_block *sb);
void scoutfs_trans_track_item(struct super_block *sb, signed items,
signed vals);
int scoutfs_trans_get_log_trees(struct super_block *sb);
bool scoutfs_trans_has_dirty(struct super_block *sb);

View File

@@ -577,10 +577,7 @@ static int scoutfs_xattr_set(struct dentry *dentry, const char *name,
retry:
ret = scoutfs_inode_index_start(sb, &ind_seq) ?:
scoutfs_inode_index_prepare(sb, &ind_locks, inode, false) ?:
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq,
SIC_XATTR_SET(found_parts,
value != NULL,
name_len, size));
scoutfs_inode_index_try_lock_hold(sb, &ind_locks, ind_seq);
if (ret > 0)
goto retry;
if (ret)
@@ -781,7 +778,7 @@ int scoutfs_xattr_drop(struct super_block *sb, u64 ino,
&tgs) != 0)
memset(&tgs, 0, sizeof(tgs));
ret = scoutfs_hold_trans(sb, SIC_EXACT(2, 0));
ret = scoutfs_hold_trans(sb);
if (ret < 0)
break;
release = true;