Loading block/as-iosched.c +2 −0 Original line number Diff line number Diff line Loading @@ -831,6 +831,8 @@ static void as_completed_request(struct request_queue *q, struct request *rq) } if (ad->changed_batch && ad->nr_dispatched == 1) { ad->current_batch_expires = jiffies + ad->batch_expire[ad->batch_data_dir]; kblockd_schedule_work(&ad->antic_work); ad->changed_batch = 0; Loading fs/buffer.c +8 −5 Original line number Diff line number Diff line Loading @@ -821,7 +821,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * contents - it is a noop if I/O is still in * flight on potentially older contents. */ ll_rw_block(SWRITE, 1, &bh); ll_rw_block(SWRITE_SYNC, 1, &bh); brelse(bh); spin_lock(lock); } Loading Loading @@ -2940,15 +2940,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; if (rw == SWRITE) if (rw == SWRITE || rw == SWRITE_SYNC) lock_buffer(bh); else if (test_set_buffer_locked(bh)) continue; if (rw == WRITE || rw == SWRITE) { if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; get_bh(bh); if (rw == SWRITE_SYNC) submit_bh(WRITE_SYNC, bh); else submit_bh(WRITE, bh); continue; } Loading Loading @@ -2978,7 +2981,7 @@ int sync_dirty_buffer(struct buffer_head *bh) if (test_clear_buffer_dirty(bh)) { get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(WRITE, bh); ret = submit_bh(WRITE_SYNC, bh); wait_on_buffer(bh); if (buffer_eopnotsupp(bh)) { clear_buffer_eopnotsupp(bh); Loading include/linux/fs.h +1 −0 Original line number Diff line number Diff line Loading @@ -83,6 +83,7 @@ extern int dir_notify_enable; #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) #define READ_META (READ | (1 << BIO_RW_META)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) #define SEL_IN 1 Loading Loading
block/as-iosched.c +2 −0 Original line number Diff line number Diff line Loading @@ -831,6 +831,8 @@ static void as_completed_request(struct request_queue *q, struct request *rq) } if (ad->changed_batch && ad->nr_dispatched == 1) { ad->current_batch_expires = jiffies + ad->batch_expire[ad->batch_data_dir]; kblockd_schedule_work(&ad->antic_work); ad->changed_batch = 0; Loading
fs/buffer.c +8 −5 Original line number Diff line number Diff line Loading @@ -821,7 +821,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) * contents - it is a noop if I/O is still in * flight on potentially older contents. */ ll_rw_block(SWRITE, 1, &bh); ll_rw_block(SWRITE_SYNC, 1, &bh); brelse(bh); spin_lock(lock); } Loading Loading @@ -2940,15 +2940,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) for (i = 0; i < nr; i++) { struct buffer_head *bh = bhs[i]; if (rw == SWRITE) if (rw == SWRITE || rw == SWRITE_SYNC) lock_buffer(bh); else if (test_set_buffer_locked(bh)) continue; if (rw == WRITE || rw == SWRITE) { if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) { if (test_clear_buffer_dirty(bh)) { bh->b_end_io = end_buffer_write_sync; get_bh(bh); if (rw == SWRITE_SYNC) submit_bh(WRITE_SYNC, bh); else submit_bh(WRITE, bh); continue; } Loading Loading @@ -2978,7 +2981,7 @@ int sync_dirty_buffer(struct buffer_head *bh) if (test_clear_buffer_dirty(bh)) { get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(WRITE, bh); ret = submit_bh(WRITE_SYNC, bh); wait_on_buffer(bh); if (buffer_eopnotsupp(bh)) { clear_buffer_eopnotsupp(bh); Loading
include/linux/fs.h +1 −0 Original line number Diff line number Diff line Loading @@ -83,6 +83,7 @@ extern int dir_notify_enable; #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) #define READ_META (READ | (1 << BIO_RW_META)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) #define SEL_IN 1 Loading