diff --git a/kmod/src/format.h b/kmod/src/format.h index 5be9ed58..07e90ad2 100644 --- a/kmod/src/format.h +++ b/kmod/src/format.h @@ -507,7 +507,13 @@ enum { /* * structures used by dlm */ +#define SCOUTFS_LOCK_SCOPE_GLOBAL 1 +#define SCOUTFS_LOCK_SCOPE_FS_ITEMS 2 + +#define SCOUTFS_LOCK_TYPE_GLOBAL_RENAME 1 + struct scoutfs_lock_name { + __u8 scope; __u8 zone; __u8 type; __le64 first; diff --git a/kmod/src/lock.c b/kmod/src/lock.c index c5712bdd..b379d95b 100644 --- a/kmod/src/lock.c +++ b/kmod/src/lock.c @@ -27,9 +27,9 @@ #include "dlmglue.h" #include "inode.h" -#define LN_FMT "%u.%u.%llu.%llu" +#define LN_FMT "%u.%u.%u.%llu.%llu" #define LN_ARG(name) \ - (name)->zone, (name)->type, le64_to_cpu((name)->first), \ + (name)->scope, (name)->zone, (name)->type, le64_to_cpu((name)->first),\ le64_to_cpu((name)->second) typedef struct ocfs2_super dlmglue_ctxt; @@ -174,6 +174,13 @@ static struct ocfs2_lock_res_ops scoufs_ino_index_lops = { .flags = 0, }; +static struct ocfs2_lock_res_ops scoutfs_global_lops = { + .get_osb = get_ino_lock_osb, + /* XXX: .post_unlock for lru */ + /* XXX: .check_downconvert that queries the item cache for dirty items */ + .flags = 0, +}; + static struct scoutfs_lock *alloc_scoutfs_lock(struct super_block *sb, struct scoutfs_lock_name *lock_name, struct ocfs2_lock_res_ops *type, @@ -185,38 +192,43 @@ static struct scoutfs_lock *alloc_scoutfs_lock(struct super_block *sb, // struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); struct scoutfs_lock *lock; + if (WARN_ON_ONCE(!!start != !!end)) + return NULL; + lock = kzalloc(sizeof(struct scoutfs_lock), GFP_NOFS); - if (lock) { + if (lock == NULL) + return NULL; + + if (start) { lock->start = scoutfs_key_dup(sb, start); lock->end = scoutfs_key_dup(sb, end); if (!lock->start || !lock->end) { free_scoutfs_lock(lock); - lock = NULL; - } else { - RB_CLEAR_NODE(&lock->node); - lock->sb = sb; - lock->lock_name = *lock_name; - lock->mode = DLM_LOCK_IV; - INIT_WORK(&lock->dc_work, scoutfs_downconvert_func); - INIT_LIST_HEAD(&lock->lru_entry); - ocfs2_lock_res_init_once(&lock->lockres); - BUG_ON(sizeof(struct scoutfs_lock_name) >= - OCFS2_LOCK_ID_MAX_LEN); - /* kzalloc above ensures that l_name is NULL terminated */ - memcpy(&lock->lockres.l_name[0], &lock->lock_name, - sizeof(struct scoutfs_lock_name)); - ocfs2_lock_res_init_common(&linfo->dlmglue, - &lock->lockres, type, lock); + return NULL; } } + RB_CLEAR_NODE(&lock->node); + lock->sb = sb; + lock->lock_name = *lock_name; + lock->mode = DLM_LOCK_IV; + INIT_WORK(&lock->dc_work, scoutfs_downconvert_func); + INIT_LIST_HEAD(&lock->lru_entry); + ocfs2_lock_res_init_once(&lock->lockres); + BUG_ON(sizeof(struct scoutfs_lock_name) >= OCFS2_LOCK_ID_MAX_LEN); + /* kzalloc above ensures that l_name is NULL terminated */ + memcpy(&lock->lockres.l_name[0], &lock->lock_name, + sizeof(struct scoutfs_lock_name)); + ocfs2_lock_res_init_common(&linfo->dlmglue, &lock->lockres, type, lock); + return lock; } static int cmp_lock_names(struct scoutfs_lock_name *a, struct scoutfs_lock_name *b) { - return (int)a->zone - (int)b->zone ?: + return (int)a->scope - (int)b->scope ?: + (int)a->zone - (int)b->zone ?: (int)a->type - (int)b->type ?: scoutfs_cmp_u64s(le64_to_cpu(a->first), le64_to_cpu(b->first)) ?: scoutfs_cmp_u64s(le64_to_cpu(b->second), le64_to_cpu(b->second)); @@ -520,6 +532,7 @@ int scoutfs_lock_ino(struct super_block *sb, int mode, int flags, u64 ino, ino &= ~(u64)SCOUTFS_LOCK_INODE_GROUP_MASK; + lock_name.scope = SCOUTFS_LOCK_SCOPE_FS_ITEMS; lock_name.zone = SCOUTFS_FS_ZONE; lock_name.type = SCOUTFS_INODE_TYPE; lock_name.first = cpu_to_le64(ino); @@ -655,6 +668,22 @@ int scoutfs_lock_inodes(struct super_block *sb, int mode, int flags, return ret; } +/* + * Acquire a cluster lock with a global scope in the lock space. + */ +int scoutfs_lock_global(struct super_block *sb, int mode, int flags, int type, + struct scoutfs_lock **lock) +{ + struct scoutfs_lock_name lock_name; + + memset(&lock_name, 0, sizeof(lock_name)); + lock_name.scope = SCOUTFS_LOCK_SCOPE_GLOBAL; + lock_name.type = type; + + return lock_name_keys(sb, mode, flags, &lock_name, &scoutfs_global_lops, + NULL, NULL, lock); +} + /* * map inode index items to locks. The idea is to not have to * constantly get locks over a reasonable distribution of items, but @@ -697,6 +726,7 @@ int scoutfs_lock_inode_index(struct super_block *sb, int mode, BUG(); } + lock_name.scope = SCOUTFS_LOCK_SCOPE_FS_ITEMS; lock_name.zone = SCOUTFS_INODE_INDEX_ZONE; lock_name.type = type; lock_name.first = cpu_to_le64(major & ~major_mask); diff --git a/kmod/src/lock.h b/kmod/src/lock.h index 5a2f926b..68ba7329 100644 --- a/kmod/src/lock.h +++ b/kmod/src/lock.h @@ -42,6 +42,8 @@ int scoutfs_lock_inodes(struct super_block *sb, int mode, int flags, struct inode *b, struct scoutfs_lock **b_lock, struct inode *c, struct scoutfs_lock **c_lock, struct inode *d, struct scoutfs_lock **D_lock); +int scoutfs_lock_global(struct super_block *sb, int mode, int flags, int type, + struct scoutfs_lock **lock); void scoutfs_unlock(struct super_block *sb, struct scoutfs_lock *lock, int level); diff --git a/kmod/src/scoutfs_trace.h b/kmod/src/scoutfs_trace.h index bd5b3855..a0c75dfc 100644 --- a/kmod/src/scoutfs_trace.h +++ b/kmod/src/scoutfs_trace.h @@ -299,6 +299,7 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class, TP_PROTO(struct super_block *sb, struct scoutfs_lock *lck), TP_ARGS(sb, lck), TP_STRUCT__entry( + __field(u8, name_scope) __field(u8, name_zone) __field(u8, name_type) __field(u64, name_first) @@ -311,6 +312,7 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class, __field(unsigned int, holders) ), TP_fast_assign( + __entry->name_scope = lck->lock_name.scope; __entry->name_zone = lck->lock_name.zone; __entry->name_type = lck->lock_name.type; __entry->name_first = le64_to_cpu(lck->lock_name.first); @@ -322,9 +324,9 @@ DECLARE_EVENT_CLASS(scoutfs_lock_class, __entry->refcnt = lck->refcnt; __entry->holders = lck->holders; ), - TP_printk("name %u.%u.%llu.%llu seq %u refs %d holders %d mode %s rqmode %s flags 0x%x", - __entry->name_zone, __entry->name_type, __entry->name_first, - __entry->name_second, __entry->seq, + TP_printk("name %u.%u.%u.%llu.%llu seq %u refs %d holders %d mode %s rqmode %s flags 0x%x", + __entry->name_scope, __entry->name_zone, __entry->name_type, + __entry->name_first, __entry->name_second, __entry->seq, __entry->refcnt, __entry->holders, lock_mode(__entry->mode), lock_mode(__entry->rqmode), __entry->flags) );