Loading fs/aio.c +18 −18 Original line number Diff line number Diff line Loading @@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = { }; #if IS_ENABLED(CONFIG_MIGRATION) static int aio_migratepage(struct address_space *mapping, struct page *new, struct page *old, enum migrate_mode mode) static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { struct kioctx *ctx; unsigned long flags; Loading Loading @@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, goto out; } idx = old->index; idx = src->index; if (idx < (pgoff_t)ctx->nr_pages) { /* Make sure the old page hasn't already been changed */ if (ctx->ring_pages[idx] != old) /* Make sure the old folio hasn't already been changed */ if (ctx->ring_pages[idx] != &src->page) rc = -EAGAIN; } else rc = -EINVAL; Loading @@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, goto out_unlock; /* Writeback must be complete */ BUG_ON(PageWriteback(old)); get_page(new); BUG_ON(folio_test_writeback(src)); folio_get(dst); rc = migrate_page_move_mapping(mapping, new, old, 1); rc = folio_migrate_mapping(mapping, dst, src, 1); if (rc != MIGRATEPAGE_SUCCESS) { put_page(new); folio_put(dst); goto out_unlock; } /* Take completion_lock to prevent other writes to the ring buffer * while the old page is copied to the new. This prevents new * while the old folio is copied to the new. This prevents new * events from being lost. */ spin_lock_irqsave(&ctx->completion_lock, flags); migrate_page_copy(new, old); BUG_ON(ctx->ring_pages[idx] != old); ctx->ring_pages[idx] = new; folio_migrate_copy(dst, src); BUG_ON(ctx->ring_pages[idx] != &src->page); ctx->ring_pages[idx] = &dst->page; spin_unlock_irqrestore(&ctx->completion_lock, flags); /* The old page is no longer accessible. */ put_page(old); /* The old folio is no longer accessible. */ folio_put(src); out_unlock: mutex_unlock(&ctx->ring_lock); Loading @@ -475,13 +475,13 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, spin_unlock(&mapping->private_lock); return rc; } #else #define aio_migrate_folio NULL #endif static const struct address_space_operations aio_ctx_aops = { .dirty_folio = noop_dirty_folio, #if IS_ENABLED(CONFIG_MIGRATION) .migratepage = aio_migratepage, #endif .migrate_folio = aio_migrate_folio, }; static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) Loading Loading
fs/aio.c +18 −18 Original line number Diff line number Diff line Loading @@ -400,8 +400,8 @@ static const struct file_operations aio_ring_fops = { }; #if IS_ENABLED(CONFIG_MIGRATION) static int aio_migratepage(struct address_space *mapping, struct page *new, struct page *old, enum migrate_mode mode) static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { struct kioctx *ctx; unsigned long flags; Loading Loading @@ -435,10 +435,10 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, goto out; } idx = old->index; idx = src->index; if (idx < (pgoff_t)ctx->nr_pages) { /* Make sure the old page hasn't already been changed */ if (ctx->ring_pages[idx] != old) /* Make sure the old folio hasn't already been changed */ if (ctx->ring_pages[idx] != &src->page) rc = -EAGAIN; } else rc = -EINVAL; Loading @@ -447,27 +447,27 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, goto out_unlock; /* Writeback must be complete */ BUG_ON(PageWriteback(old)); get_page(new); BUG_ON(folio_test_writeback(src)); folio_get(dst); rc = migrate_page_move_mapping(mapping, new, old, 1); rc = folio_migrate_mapping(mapping, dst, src, 1); if (rc != MIGRATEPAGE_SUCCESS) { put_page(new); folio_put(dst); goto out_unlock; } /* Take completion_lock to prevent other writes to the ring buffer * while the old page is copied to the new. This prevents new * while the old folio is copied to the new. This prevents new * events from being lost. */ spin_lock_irqsave(&ctx->completion_lock, flags); migrate_page_copy(new, old); BUG_ON(ctx->ring_pages[idx] != old); ctx->ring_pages[idx] = new; folio_migrate_copy(dst, src); BUG_ON(ctx->ring_pages[idx] != &src->page); ctx->ring_pages[idx] = &dst->page; spin_unlock_irqrestore(&ctx->completion_lock, flags); /* The old page is no longer accessible. */ put_page(old); /* The old folio is no longer accessible. */ folio_put(src); out_unlock: mutex_unlock(&ctx->ring_lock); Loading @@ -475,13 +475,13 @@ static int aio_migratepage(struct address_space *mapping, struct page *new, spin_unlock(&mapping->private_lock); return rc; } #else #define aio_migrate_folio NULL #endif static const struct address_space_operations aio_ctx_aops = { .dirty_folio = noop_dirty_folio, #if IS_ENABLED(CONFIG_MIGRATION) .migratepage = aio_migratepage, #endif .migrate_folio = aio_migrate_folio, }; static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) Loading