Unverified Commit d1ecc4c3 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3779 [sync] PR-3420: md: protect md_thread with rcu

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/3420 
 
PR sync from: Li Lingfeng <lilingfeng3@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/LIV6O4BF2E6AH6CZN7GIEXGTSZMTUWFO/ 
Protect md_thread with rcu

v1->v2:
  Add missing issue.

Yu Kuai (5):
  md: factor out a helper to wake up md_thread directly
  dm-raid: remove useless checking in raid_message()
  md/bitmap: always wake up md_thread in timeout_store
  md/bitmap: factor out a helper to set timeout
  md: protect md_thread with rcu


-- 
2.31.1
 
https://gitee.com/openeuler/kernel/issues/I8OPEK 
 
Link:https://gitee.com/openeuler/kernel/pulls/3779

 

Reviewed-by: default avatarYu Kuai <yukuai3@huawei.com>
Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 7dccb832 0d012343
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -3717,11 +3717,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
		 * canceling read-auto mode
		 */
		mddev->ro = 0;
		if (!mddev->suspended && mddev->sync_thread)
		if (!mddev->suspended)
			md_wakeup_thread(mddev->sync_thread);
	}
	set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
	if (!mddev->suspended && mddev->thread)
	if (!mddev->suspended)
		md_wakeup_thread(mddev->thread);

	return 0;
+26 −17
Original line number Diff line number Diff line
@@ -1219,11 +1219,28 @@ static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap,
					       sector_t offset, sector_t *blocks,
					       int create);

static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout,
			      bool force)
{
	struct md_thread *thread;

	rcu_read_lock();
	thread = rcu_dereference(mddev->thread);

	if (!thread)
		goto out;

	if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT)
		thread->timeout = timeout;

out:
	rcu_read_unlock();
}

/*
 * bitmap daemon -- periodically wakes up to clean bits and flush pages
 *			out to disk
 */

void md_bitmap_daemon_work(struct mddev *mddev)
{
	struct bitmap *bitmap;
@@ -1247,7 +1264,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)

	bitmap->daemon_lastrun = jiffies;
	if (bitmap->allclean) {
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
		mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);
		goto done;
	}
	bitmap->allclean = 1;
@@ -1344,8 +1361,7 @@ void md_bitmap_daemon_work(struct mddev *mddev)

 done:
	if (bitmap->allclean == 0)
		mddev->thread->timeout =
			mddev->bitmap_info.daemon_sleep;
		mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
	mutex_unlock(&mddev->bitmap_info.mutex);
}

@@ -1801,8 +1817,7 @@ void md_bitmap_destroy(struct mddev *mddev)
	mddev->bitmap = NULL; /* disconnect from the md device */
	spin_unlock(&mddev->lock);
	mutex_unlock(&mddev->bitmap_info.mutex);
	if (mddev->thread)
		mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
	mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true);

	md_bitmap_free(bitmap);
}
@@ -1945,7 +1960,7 @@ int md_bitmap_load(struct mddev *mddev)
	/* Kick recovery in case any bits were set */
	set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery);

	mddev->thread->timeout = mddev->bitmap_info.daemon_sleep;
	mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true);
	md_wakeup_thread(mddev->thread);

	md_bitmap_update_sb(bitmap);
@@ -2450,17 +2465,11 @@ timeout_store(struct mddev *mddev, const char *buf, size_t len)
		timeout = MAX_SCHEDULE_TIMEOUT-1;
	if (timeout < 1)
		timeout = 1;

	mddev->bitmap_info.daemon_sleep = timeout;
	if (mddev->thread) {
		/* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
		 * the bitmap is all clean and we don't need to
		 * adjust the timeout right now
		 */
		if (mddev->thread->timeout < MAX_SCHEDULE_TIMEOUT) {
			mddev->thread->timeout = timeout;
	mddev_set_timeout(mddev, timeout, false);
	md_wakeup_thread(mddev->thread);
		}
	}

	return len;
}

+11 −6
Original line number Diff line number Diff line
@@ -75,14 +75,14 @@ struct md_cluster_info {
	sector_t suspend_hi;
	int suspend_from; /* the slot which broadcast suspend_lo/hi */

	struct md_thread *recovery_thread;
	struct md_thread __rcu *recovery_thread;
	unsigned long recovery_map;
	/* communication loc resources */
	struct dlm_lock_resource *ack_lockres;
	struct dlm_lock_resource *message_lockres;
	struct dlm_lock_resource *token_lockres;
	struct dlm_lock_resource *no_new_dev_lockres;
	struct md_thread *recv_thread;
	struct md_thread __rcu *recv_thread;
	struct completion newdisk_completion;
	wait_queue_head_t wait;
	unsigned long state;
@@ -362,8 +362,8 @@ static void __recover_slot(struct mddev *mddev, int slot)

	set_bit(slot, &cinfo->recovery_map);
	if (!cinfo->recovery_thread) {
		cinfo->recovery_thread = md_register_thread(recover_bitmaps,
				mddev, "recover");
		rcu_assign_pointer(cinfo->recovery_thread,
			md_register_thread(recover_bitmaps, mddev, "recover"));
		if (!cinfo->recovery_thread) {
			pr_warn("md-cluster: Could not create recovery thread\n");
			return;
@@ -526,11 +526,15 @@ static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
static void process_metadata_update(struct mddev *mddev, struct cluster_msg *msg)
{
	int got_lock = 0;
	struct md_thread *thread;
	struct md_cluster_info *cinfo = mddev->cluster_info;
	mddev->good_device_nr = le32_to_cpu(msg->raid_slot);

	dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
	wait_event(mddev->thread->wqueue,

	/* daemaon thread must exist */
	thread = rcu_dereference_protected(mddev->thread, true);
	wait_event(thread->wqueue,
		   (got_lock = mddev_trylock(mddev)) ||
		    test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state));
	md_reload_sb(mddev, mddev->good_device_nr);
@@ -890,7 +894,8 @@ static int join(struct mddev *mddev, int nodes)
	}
	/* Initiate the communication resources */
	ret = -ENOMEM;
	cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
	rcu_assign_pointer(cinfo->recv_thread,
			md_register_thread(recv_daemon, mddev, "cluster_recv"));
	if (!cinfo->recv_thread) {
		pr_err("md-cluster: cannot allocate memory for recv_thread!\n");
		goto err;
+2 −2
Original line number Diff line number Diff line
@@ -407,8 +407,8 @@ static int multipath_run (struct mddev *mddev)
	if (ret)
		goto out_free_conf;

	mddev->thread = md_register_thread(multipathd, mddev,
					   "multipath");
	rcu_assign_pointer(mddev->thread,
			   md_register_thread(multipathd, mddev, "multipath"));
	if (!mddev->thread)
		goto out_free_conf;

+47 −41
Original line number Diff line number Diff line
@@ -68,11 +68,7 @@
#include "md-bitmap.h"
#include "md-cluster.h"

/* pers_list is a list of registered personalities protected
 * by pers_lock.
 * pers_lock does extra service to protect accesses to
 * mddev->thread when the mutex cannot be held.
 */
/* pers_list is a list of registered personalities protected by pers_lock. */
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);

@@ -90,6 +86,7 @@ static struct workqueue_struct *md_rdev_misc_wq;
static int remove_and_add_spares(struct mddev *mddev,
				 struct md_rdev *this);
static void mddev_detach(struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread);

/*
 * Default number of read corrections we'll attempt on an rdev
@@ -499,8 +496,10 @@ static blk_qc_t md_submit_bio(struct bio *bio)
 */
void mddev_suspend(struct mddev *mddev)
{
	WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
	lockdep_assert_held(&mddev->reconfig_mutex);
	struct md_thread *thread = rcu_dereference_protected(mddev->thread,
			lockdep_is_held(&mddev->reconfig_mutex));

	WARN_ON_ONCE(thread && current == thread->tsk);
	if (mddev->suspended++)
		return;
	synchronize_rcu();
@@ -845,13 +844,8 @@ void mddev_unlock(struct mddev *mddev)
	} else
		mutex_unlock(&mddev->reconfig_mutex);

	/* As we've dropped the mutex we need a spinlock to
	 * make sure the thread doesn't disappear
	 */
	spin_lock(&pers_lock);
	md_wakeup_thread(mddev->thread);
	wake_up(&mddev->sb_wait);
	spin_unlock(&pers_lock);
}
EXPORT_SYMBOL_GPL(mddev_unlock);

@@ -6391,10 +6385,12 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
	}
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
	if (mddev->sync_thread)
		/* Thread might be blocked waiting for metadata update
		 * which will now never happen */
		wake_up_process(mddev->sync_thread->tsk);

	/*
	 * Thread might be blocked waiting for metadata update which will now
	 * never happen
	 */
	md_wakeup_thread_directly(mddev->sync_thread);

	if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
		return -EBUSY;
@@ -6455,10 +6451,12 @@ static int do_md_stop(struct mddev *mddev, int mode,
	}
	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
	if (mddev->sync_thread)
		/* Thread might be blocked waiting for metadata update
		 * which will now never happen */
		wake_up_process(mddev->sync_thread->tsk);

	/*
	 * Thread might be blocked waiting for metadata update which will now
	 * never happen
	 */
	md_wakeup_thread_directly(mddev->sync_thread);

	mddev_unlock(mddev);
	wait_event(resync_wait, (mddev->sync_thread == NULL &&
@@ -8005,13 +8003,29 @@ static int md_thread(void *arg)
	return 0;
}

void md_wakeup_thread(struct md_thread *thread)
static void md_wakeup_thread_directly(struct md_thread __rcu *thread)
{
	if (thread) {
		pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm);
		set_bit(THREAD_WAKEUP, &thread->flags);
		wake_up(&thread->wqueue);
	struct md_thread *t;

	rcu_read_lock();
	t = rcu_dereference(thread);
	if (t)
		wake_up_process(t->tsk);
	rcu_read_unlock();
}

void md_wakeup_thread(struct md_thread __rcu *thread)
{
	struct md_thread *t;

	rcu_read_lock();
	t = rcu_dereference(thread);
	if (t) {
		pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
		set_bit(THREAD_WAKEUP, &t->flags);
		wake_up(&t->wqueue);
	}
	rcu_read_unlock();
}
EXPORT_SYMBOL(md_wakeup_thread);

@@ -8041,22 +8055,15 @@ struct md_thread *md_register_thread(void (*run) (struct md_thread *),
}
EXPORT_SYMBOL(md_register_thread);

void md_unregister_thread(struct md_thread **threadp)
void md_unregister_thread(struct md_thread __rcu **threadp)
{
	struct md_thread *thread;
	struct md_thread *thread = rcu_dereference_protected(*threadp, true);

	/*
	 * Locking ensures that mddev_unlock does not wake_up a
	 * non-existent thread
	 */
	spin_lock(&pers_lock);
	thread = *threadp;
	if (!thread) {
		spin_unlock(&pers_lock);
	if (!thread)
		return;
	}
	*threadp = NULL;
	spin_unlock(&pers_lock);

	rcu_assign_pointer(*threadp, NULL);
	synchronize_rcu();

	pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
	kthread_stop(thread->tsk);
@@ -9257,9 +9264,8 @@ static void md_start_sync(struct work_struct *ws)
{
	struct mddev *mddev = container_of(ws, struct mddev, del_work);

	mddev->sync_thread = md_register_thread(md_do_sync,
						mddev,
						"resync");
	rcu_assign_pointer(mddev->sync_thread,
			   md_register_thread(md_do_sync, mddev, "resync"));
	if (!mddev->sync_thread) {
		pr_warn("%s: could not start resync thread...\n",
			mdname(mddev));
Loading