Unverified Commit d6879a8e authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!872 [sync] PR-863: Backport CVEs and bugfixes

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/863 
 
PR sync from:  Jialin Zhang <zhangjialin11@huawei.com>
 https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/thread/UAMVHA4ICOFJJXDMX2CXEV6TEZSY7Y7U/ 
Pull new CVEs:
CVE-2023-22998

cgroup bugfix from Gaosheng Cui
sched bugfix from Xia Fukun
block bugfixes from Zhong Jinghua and Yu Kuai
iomap and ext4 bugfixes from Baokun Li
md and eulerfs bugfixes from Yu Kuai

-- 
2.25.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/872

 

Reviewed-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents c4fb2bc6 9bd94292
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -186,6 +186,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,

void blk_account_io_start(struct request *req);
void blk_account_io_done(struct request *req, u64 now);
int disk_scan_partitions(struct gendisk *disk, fmode_t mode);

/*
 * Plug flush limits
+40 −7
Original line number Diff line number Diff line
@@ -736,17 +736,45 @@ static void register_disk(struct device *parent, struct gendisk *disk,
	}
}

static void disk_scan_partitions(struct gendisk *disk)
int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
{
	struct block_device *bdev;
	struct block_device *claim;
	int ret = 0;

	if (!get_capacity(disk) || !disk_part_scan_enabled(disk))
		return;
	if (!disk_part_scan_enabled(disk))
		return -EINVAL;

	/*
	 * If the device is opened exclusively by current thread already, it's
	 * safe to scan partitons, otherwise, use bd_prepare_to_claim() to
	 * synchronize with other exclusive openers and other partition
	 * scanners.
	 */
	if (!(mode & FMODE_EXCL)) {
		claim = bdget_part(&disk->part0);
		if (!claim)
			return -ENOMEM;

		ret = bd_prepare_to_claim(claim, claim, disk_scan_partitions);
		if (ret) {
			bdput(claim);
			return ret;
		}
	}

	set_bit(GD_NEED_PART_SCAN, &disk->state);
	bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL);
	if (!IS_ERR(bdev))
		blkdev_put(bdev, FMODE_READ);
	bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL);
	if (IS_ERR(bdev))
		ret = PTR_ERR(bdev);
	else
		blkdev_put(bdev, mode & ~FMODE_EXCL);

	if (!(mode & FMODE_EXCL)) {
		bd_abort_claiming(claim, claim, disk_scan_partitions);
		bdput(claim);
	}
	return ret;
}

static void disk_init_partition(struct gendisk *disk)
@@ -755,7 +783,8 @@ static void disk_init_partition(struct gendisk *disk)
	struct disk_part_iter piter;
	struct hd_struct *part;

	disk_scan_partitions(disk);
	if (get_capacity(disk))
		disk_scan_partitions(disk, FMODE_READ);

	/* announce disk after possible partitions are created */
	dev_set_uevent_suppress(ddev, 0);
@@ -847,6 +876,10 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
	disk_add_events(disk);
	blk_integrity_add(disk);

	/* Make sure the first partition scan will be proceed */
	if (get_capacity(disk) && disk_part_scan_enabled(disk))
		set_bit(GD_NEED_PART_SCAN, &disk->state);

	/*
	 * Set the flag at last, so that block devcie can't be opened
	 * before it's registration is done.
+14 −26
Original line number Diff line number Diff line
@@ -32,9 +32,16 @@ static int blkpg_do_ioctl(struct block_device *bdev,
	if (op == BLKPG_DEL_PARTITION)
		return bdev_del_partition(bdev, p.pno);

	if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
		return -EINVAL;

	start = p.start >> SECTOR_SHIFT;
	length = p.length >> SECTOR_SHIFT;

	/* length may be equal to 0 after right shift */
	if (!length || start + length > get_capacity(bdev->bd_disk))
		return -EINVAL;

	/* check for fit in a hd_struct */
	if (sizeof(sector_t) < sizeof(long long)) {
		long pstart = start, plength = length;
@@ -90,31 +97,6 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
}
#endif

static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
{
	struct block_device *tmp;

	if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
		return -EINVAL;
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (bdev->bd_part_count)
		return -EBUSY;

	/*
	 * Reopen the device to revalidate the driver state and force a
	 * partition rescan.
	 */
	mode &= ~FMODE_EXCL;
	set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);

	tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
	if (IS_ERR(tmp))
		return PTR_ERR(tmp);
	blkdev_put(tmp, mode);
	return 0;
}

static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
		unsigned long arg, unsigned long flags)
{
@@ -562,7 +544,13 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
		bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
		return 0;
	case BLKRRPART:
		return blkdev_reread_part(bdev, mode);
		if (!capable(CAP_SYS_ADMIN))
			return -EACCES;
		if (bdev_is_partition(bdev))
			return -EINVAL;
		if (bdev->bd_part_count)
			return -EBUSY;
		return disk_scan_partitions(bdev->bd_disk, mode);
	case BLKTRACESTART:
	case BLKTRACESTOP:
	case BLKTRACETEARDOWN:
+4 −2
Original line number Diff line number Diff line
@@ -157,9 +157,11 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
	 * since virtio_gpu doesn't support dma-buf import from other devices.
	 */
	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
	if (!shmem->pages) {
	if (IS_ERR(shmem->pages)) {
		drm_gem_shmem_unpin(&bo->base.base);
		return -EINVAL;
		ret = PTR_ERR(shmem->pages);
		shmem->pages = NULL;
		return ret;
	}

	if (use_dma_api) {
+72 −17
Original line number Diff line number Diff line
@@ -690,12 +690,14 @@ void mddev_init(struct mddev *mddev)
	mutex_init(&mddev->open_mutex);
	mutex_init(&mddev->reconfig_mutex);
	mutex_init(&mddev->bitmap_info.mutex);
	mutex_init(&mddev->sync_mutex);
	INIT_LIST_HEAD(&mddev->disks);
	INIT_LIST_HEAD(&mddev->all_mddevs);
	timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
	atomic_set(&mddev->active, 1);
	atomic_set(&mddev->openers, 0);
	atomic_set(&mddev->active_io, 0);
	atomic_set(&mddev->sync_seq, 0);
	spin_lock_init(&mddev->lock);
	atomic_set(&mddev->flush_pending, 0);
	init_waitqueue_head(&mddev->sb_wait);
@@ -4855,29 +4857,80 @@ action_show(struct mddev *mddev, char *page)
	return sprintf(page, "%s\n", type);
}

static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
static void stop_sync_thread(struct mddev *mddev)
{
	if (!mddev->pers || !mddev->pers->sync_request)
		return -EINVAL;
	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		return;

	if (mddev_lock(mddev))
		return;

	/*
	 * Check again in case MD_RECOVERY_RUNNING is cleared before lock is
	 * held.
	 */
	if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
		mddev_unlock(mddev);
		return;
	}

	if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
		if (cmd_match(page, "frozen"))
			set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
		else
			clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
		if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
		    mddev_lock(mddev) == 0) {
	if (work_pending(&mddev->del_work))
		flush_workqueue(md_misc_wq);
			if (mddev->sync_thread) {

	set_bit(MD_RECOVERY_INTR, &mddev->recovery);
				md_reap_sync_thread(mddev);
			}
	/*
	 * Thread might be blocked waiting for metadata update which will now
	 * never happen.
	 */
	if (mddev->sync_thread)
		wake_up_process(mddev->sync_thread->tsk);

	mddev_unlock(mddev);
}
	} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))

static void idle_sync_thread(struct mddev *mddev)
{
	int sync_seq = atomic_read(&mddev->sync_seq);

	if (mutex_lock_interruptible(&mddev->sync_mutex))
		return;

	clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
	stop_sync_thread(mddev);

	wait_event_interruptible(resync_wait,
			sync_seq != atomic_read(&mddev->sync_seq) ||
			!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));

	mutex_unlock(&mddev->sync_mutex);
}

static void frozen_sync_thread(struct mddev *mddev)
{
	if (mutex_lock_interruptible(&mddev->sync_mutex))
		return;

	set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
	stop_sync_thread(mddev);

	wait_event_interruptible(resync_wait, mddev->sync_thread == NULL &&
			!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));

	mutex_unlock(&mddev->sync_mutex);
}

static ssize_t
action_store(struct mddev *mddev, const char *page, size_t len)
{
	if (!mddev->pers || !mddev->pers->sync_request)
		return -EINVAL;


	if (cmd_match(page, "idle"))
		idle_sync_thread(mddev);
	else if (cmd_match(page, "frozen"))
		frozen_sync_thread(mddev);
	else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		return -EBUSY;
	else if (cmd_match(page, "resync"))
		clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -9437,6 +9490,8 @@ void md_reap_sync_thread(struct mddev *mddev)

	/* resync has finished, collect result */
	md_unregister_thread(&mddev->sync_thread);
	atomic_inc(&mddev->sync_seq);

	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
	    !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
	    mddev->degraded != mddev->raid_disks) {
@@ -9481,7 +9536,6 @@ void md_reap_sync_thread(struct mddev *mddev)
	if (mddev_is_clustered(mddev) && is_reshaped
				      && !test_bit(MD_CLOSING, &mddev->flags))
		md_cluster_ops->update_size(mddev, old_dev_sectors);
	wake_up(&resync_wait);
	/* flag recovery needed just to double check */
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	sysfs_notify_dirent_safe(mddev->sysfs_completed);
@@ -9489,6 +9543,7 @@ void md_reap_sync_thread(struct mddev *mddev)
	md_new_event(mddev);
	if (mddev->event_work.func)
		queue_work(md_misc_wq, &mddev->event_work);
	wake_up(&resync_wait);
}
EXPORT_SYMBOL(md_reap_sync_thread);

Loading