Commit ada5caed authored by Minchan Kim's avatar Minchan Kim Committed by Andrew Morton
Browse files

zsmalloc: remove zs_compact_control

__zs_compact always putback src_zspage into class list after
migrate_zspage.  Thus, we don't need to keep last position of src_zspage
any more.  Let's remove it.

Link: https://lkml.kernel.org/r/20230624053120.643409-4-senozhatsky@chromium.org


Signed-off-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Acked-by: default avatarSergey Senozhatsky <senozhatsky@chromium.org>
Cc: Alexey Romanov <avromanov@sberdevices.ru>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4ce36584
Loading
Loading
Loading
Loading
+9 −28
Original line number Diff line number Diff line
@@ -1590,25 +1590,14 @@ static unsigned long find_alloced_obj(struct size_class *class,
	return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG);
}

struct zs_compact_control {
	/* Source spage for migration which could be a subpage of zspage */
	struct page *s_page;
	/* Destination page for migration which should be a first page
	 * of zspage. */
	struct page *d_page;
	 /* Starting object index within @s_page which used for live object
	  * in the subpage. */
	int obj_idx;
};

static void migrate_zspage(struct zs_pool *pool, struct size_class *class,
			   struct zs_compact_control *cc)
static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
			   struct zspage *dst_zspage)
{
	unsigned long used_obj, free_obj;
	unsigned long handle;
	struct page *s_page = cc->s_page;
	struct page *d_page = cc->d_page;
	int obj_idx = cc->obj_idx;
	int obj_idx = 0;
	struct page *s_page = get_first_page(src_zspage);
	struct size_class *class = pool->size_class[src_zspage->class];

	while (1) {
		handle = find_alloced_obj(class, s_page, &obj_idx);
@@ -1621,24 +1610,20 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class,
		}

		used_obj = handle_to_obj(handle);
		free_obj = obj_malloc(pool, get_zspage(d_page), handle);
		free_obj = obj_malloc(pool, dst_zspage, handle);
		zs_object_copy(class, free_obj, used_obj);
		obj_idx++;
		record_obj(handle, free_obj);
		obj_free(class->size, used_obj);

		/* Stop if there is no more space */
		if (zspage_full(class, get_zspage(d_page)))
		if (zspage_full(class, dst_zspage))
			break;

		/* Stop if there are no more objects to migrate */
		if (zspage_empty(get_zspage(s_page)))
		if (zspage_empty(src_zspage))
			break;
	}

	/* Remember last position in this iteration */
	cc->s_page = s_page;
	cc->obj_idx = obj_idx;
}

static struct zspage *isolate_src_zspage(struct size_class *class)
@@ -2013,7 +1998,6 @@ static unsigned long zs_can_compact(struct size_class *class)
static unsigned long __zs_compact(struct zs_pool *pool,
				  struct size_class *class)
{
	struct zs_compact_control cc;
	struct zspage *src_zspage = NULL;
	struct zspage *dst_zspage = NULL;
	unsigned long pages_freed = 0;
@@ -2031,7 +2015,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
			if (!dst_zspage)
				break;
			migrate_write_lock(dst_zspage);
			cc.d_page = get_first_page(dst_zspage);
		}

		src_zspage = isolate_src_zspage(class);
@@ -2040,9 +2023,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,

		migrate_write_lock_nested(src_zspage);

		cc.obj_idx = 0;
		cc.s_page = get_first_page(src_zspage);
		migrate_zspage(pool, class, &cc);
		migrate_zspage(pool, src_zspage, dst_zspage);
		fg = putback_zspage(class, src_zspage);
		migrate_write_unlock(src_zspage);