Unverified Commit d4bd6f22 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!1954 zs_malloc: return ERR_PTR on failure

Merge Pull Request from: @ci-robot 
 
PR sync from: Jinjiang Tu <tujinjiang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/F75QF6TCLSEE5B5P7TR44NDZIQ5SDL46/ 
1. zs_malloc() returns error code instead of 0 when
   error happens.
2. check parameter handle in zs_free().

Hui Zhu (1):
  zsmalloc: zs_malloc: return ERR_PTR on failure

Sergey Senozhatsky (1):
  mm/zsmalloc: do not attempt to free IS_ERR handle


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I7TWVA 
 
Link:https://gitee.com/openeuler/kernel/pulls/1954

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 8ba3ffe1 f1e6c548
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -1488,7 +1488,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
{
	int ret = 0;
	unsigned long alloced_pages;
	unsigned long handle = 0;
	unsigned long handle = -ENOMEM;
	unsigned int comp_len = 0;
	void *src, *dst, *mem;
	struct zcomp_strm *zstrm;
@@ -1534,21 +1534,21 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
	 * if we have a 'non-null' handle here then we are coming
	 * from the slow path and handle has already been allocated.
	 */
	if (!handle)
	if (IS_ERR((void *)handle))
		handle = zs_malloc(zram->mem_pool, comp_len,
				__GFP_KSWAPD_RECLAIM |
				__GFP_NOWARN |
				__GFP_HIGHMEM |
				__GFP_MOVABLE);
	if (!handle) {
	if (IS_ERR((void *)handle)) {
		zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
		atomic64_inc(&zram->stats.writestall);
		handle = zs_malloc(zram->mem_pool, comp_len,
				GFP_NOIO | __GFP_HIGHMEM |
				__GFP_MOVABLE);
		if (handle)
		if (!IS_ERR((void *)handle))
			goto compress_again;
		return -ENOMEM;
		return PTR_ERR((void *)handle);
	}

	alloced_pages = zs_get_total_pages(zram->mem_pool);
+9 −6
Original line number Diff line number Diff line
@@ -401,7 +401,10 @@ static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
			unsigned long *handle)
{
	*handle = zs_malloc(pool, size, gfp);
	return *handle ? 0 : -1;

	if (IS_ERR((void *)(*handle)))
		return PTR_ERR((void *)*handle);
	return 0;
}
static void zs_zpool_free(void *pool, unsigned long handle)
{
@@ -1428,7 +1431,7 @@ static unsigned long obj_malloc(struct size_class *class,
 * @gfp: gfp flags when allocating object
 *
 * On success, handle to the allocated object is returned,
 * otherwise 0.
 * otherwise an ERR_PTR().
 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
 */
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
@@ -1439,11 +1442,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
	struct zspage *zspage;

	if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
		return 0;
		return (unsigned long)ERR_PTR(-EINVAL);

	handle = cache_alloc_handle(pool, gfp);
	if (!handle)
		return 0;
		return (unsigned long)ERR_PTR(-ENOMEM);

	/* extra space in chunk to keep the handle */
	size += ZS_HANDLE_SIZE;
@@ -1466,7 +1469,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
	zspage = alloc_zspage(pool, class, gfp);
	if (!zspage) {
		cache_free_handle(pool, handle);
		return 0;
		return (unsigned long)ERR_PTR(-ENOMEM);
	}

	spin_lock(&class->lock);
@@ -1523,7 +1526,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
	enum fullness_group fullness;
	bool isolated;

	if (unlikely(!handle))
	if (IS_ERR_OR_NULL((void *)handle))
		return;

	pin_tag(handle);