diff --git a/kmod/src/super.c b/kmod/src/super.c index 847b79b8..f89bad20 100644 --- a/kmod/src/super.c +++ b/kmod/src/super.c @@ -214,7 +214,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent) atomic_set(&sbi->trans_holds, 0); init_waitqueue_head(&sbi->trans_hold_wq); spin_lock_init(&sbi->trans_write_lock); - INIT_WORK(&sbi->trans_write_work, scoutfs_trans_write_func); + INIT_DELAYED_WORK(&sbi->trans_write_work, scoutfs_trans_write_func); init_waitqueue_head(&sbi->trans_write_wq); /* XXX can have multiple mounts of a device, need mount id */ @@ -242,6 +242,7 @@ static int scoutfs_fill_super(struct super_block *sb, void *data, int silent) if (!sb->s_root) return -ENOMEM; + scoutfs_trans_restart_sync_deadline(sb); // scoutfs_scan_orphans(sb); return 0; diff --git a/kmod/src/super.h b/kmod/src/super.h index 5b6d5903..8bbd313b 100644 --- a/kmod/src/super.h +++ b/kmod/src/super.h @@ -40,7 +40,7 @@ struct scoutfs_sb_info { spinlock_t trans_write_lock; u64 trans_write_count; int trans_write_ret; - struct work_struct trans_write_work; + struct delayed_work trans_write_work; wait_queue_head_t trans_write_wq; struct workqueue_struct *trans_write_workq; diff --git a/kmod/src/trans.c b/kmod/src/trans.c index 11941c7e..58c12465 100644 --- a/kmod/src/trans.c +++ b/kmod/src/trans.c @@ -50,6 +50,9 @@ * very long time. */ +/* sync dirty data at least this often */ +#define TRANS_SYNC_DELAY (HZ * 10) + /* * This work func is responsible for writing out all the dirty blocks * that make up the current dirty transaction. It prevents writers from @@ -77,7 +80,7 @@ void scoutfs_trans_write_func(struct work_struct *work) { struct scoutfs_sb_info *sbi = container_of(work, struct scoutfs_sb_info, - trans_write_work); + trans_write_work.work); struct super_block *sb = sbi->sb; struct scoutfs_bio_completion comp; struct scoutfs_segment *seg; @@ -125,6 +128,8 @@ out: wake_up(&sbi->trans_hold_wq); sbi->trans_task = NULL; + + scoutfs_trans_restart_sync_deadline(sb); } struct write_attempt { @@ -148,9 +153,14 @@ static int write_attempted(struct scoutfs_sb_info *sbi, return done; } + +/* + * We always have delayed sync work pending but the caller wants it + * to execute immediately. + */ static void queue_trans_work(struct scoutfs_sb_info *sbi) { - queue_work(sbi->trans_write_workq, &sbi->trans_write_work); + mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work, 0); } /* @@ -194,6 +204,14 @@ int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end, return scoutfs_sync_fs(file->f_inode->i_sb, 1); } +void scoutfs_trans_restart_sync_deadline(struct super_block *sb) +{ + struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); + + mod_delayed_work(sbi->trans_write_workq, &sbi->trans_write_work, + TRANS_SYNC_DELAY); +} + /* * The holder that creates the most dirty item data is adding a full * size xattr. The largest xattr can have a 255 byte name and 64KB @@ -322,7 +340,7 @@ void scoutfs_shutdown_trans(struct super_block *sb) struct scoutfs_sb_info *sbi = SCOUTFS_SB(sb); if (sbi->trans_write_workq) { - flush_work(&sbi->trans_write_work); + cancel_delayed_work_sync(&sbi->trans_write_work); destroy_workqueue(sbi->trans_write_workq); } } diff --git a/kmod/src/trans.h b/kmod/src/trans.h index f1ecbc51..396ad6be 100644 --- a/kmod/src/trans.h +++ b/kmod/src/trans.h @@ -5,6 +5,7 @@ void scoutfs_trans_write_func(struct work_struct *work); int scoutfs_sync_fs(struct super_block *sb, int wait); int scoutfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); +void scoutfs_trans_restart_sync_deadline(struct super_block *sb); int scoutfs_hold_trans(struct super_block *sb); void scoutfs_release_trans(struct super_block *sb);