Commit a14c5738 authored by Namjae Jeon's avatar Namjae Jeon Committed by Steve French
Browse files

ksmbd: use wait_event instead of schedule_timeout()



ksmbd threads eating masses of cputime when connection is disconnected.
If connection is disconnected, ksmbd thread waits for pending requests
to be processed using schedule_timeout. schedule_timeout() incorrectly
is used, and it is more efficient to use wait_event/wake_up than to check
r_count every time with timeout.

Signed-off-by: default avatarNamjae Jeon <linkinjeon@kernel.org>
Reviewed-by: default avatarHyunchul Lee <hyc.lee@gmail.com>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 17ea92a9
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -66,6 +66,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
	conn->outstanding_credits = 0;

	init_waitqueue_head(&conn->req_running_q);
	init_waitqueue_head(&conn->r_count_q);
	INIT_LIST_HEAD(&conn->conns_list);
	INIT_LIST_HEAD(&conn->requests);
	INIT_LIST_HEAD(&conn->async_requests);
@@ -165,7 +166,6 @@ int ksmbd_conn_write(struct ksmbd_work *work)
	struct kvec iov[3];
	int iov_idx = 0;

	ksmbd_conn_try_dequeue_request(work);
	if (!work->response_buf) {
		pr_err("NULL response header\n");
		return -EINVAL;
@@ -347,8 +347,8 @@ int ksmbd_conn_handler_loop(void *p)

out:
	/* Wait till all reference dropped to the Server object*/
	while (atomic_read(&conn->r_count) > 0)
		schedule_timeout(HZ);
	wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);


	unload_nls(conn->local_nls);
	if (default_conn_ops.terminate_fn)
+1 −0
Original line number Diff line number Diff line
@@ -65,6 +65,7 @@ struct ksmbd_conn {
	unsigned int			outstanding_credits;
	spinlock_t			credits_lock;
	wait_queue_head_t		req_running_q;
	wait_queue_head_t		r_count_q;
	/* Lock to protect requests list*/
	spinlock_t			request_lock;
	struct list_head		requests;
+22 −13
Original line number Diff line number Diff line
@@ -615,18 +615,13 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
	struct ksmbd_file *fp;

	fp = ksmbd_lookup_durable_fd(br_info->fid);
	if (!fp) {
		atomic_dec(&conn->r_count);
		ksmbd_free_work_struct(work);
		return;
	}
	if (!fp)
		goto out;

	if (allocate_oplock_break_buf(work)) {
		pr_err("smb2_allocate_rsp_buf failed! ");
		atomic_dec(&conn->r_count);
		ksmbd_fd_put(work, fp);
		ksmbd_free_work_struct(work);
		return;
		goto out;
	}

	rsp_hdr = smb2_get_msg(work->response_buf);
@@ -667,8 +662,16 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)

	ksmbd_fd_put(work, fp);
	ksmbd_conn_write(work);

out:
	ksmbd_free_work_struct(work);
	atomic_dec(&conn->r_count);
	/*
	 * Checking waitqueue to dropping pending requests on
	 * disconnection. waitqueue_active is safe because it
	 * uses atomic operation for condition.
	 */
	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
		wake_up(&conn->r_count_q);
}

/**
@@ -731,9 +734,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)

	if (allocate_oplock_break_buf(work)) {
		ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
		ksmbd_free_work_struct(work);
		atomic_dec(&conn->r_count);
		return;
		goto out;
	}

	rsp_hdr = smb2_get_msg(work->response_buf);
@@ -771,8 +772,16 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
	inc_rfc1001_len(work->response_buf, 44);

	ksmbd_conn_write(work);

out:
	ksmbd_free_work_struct(work);
	atomic_dec(&conn->r_count);
	/*
	 * Checking waitqueue to dropping pending requests on
	 * disconnection. waitqueue_active is safe because it
	 * uses atomic operation for condition.
	 */
	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
		wake_up(&conn->r_count_q);
}

/**
+7 −1
Original line number Diff line number Diff line
@@ -261,7 +261,13 @@ static void handle_ksmbd_work(struct work_struct *wk)

	ksmbd_conn_try_dequeue_request(work);
	ksmbd_free_work_struct(work);
	atomic_dec(&conn->r_count);
	/*
	 * Checking waitqueue to dropping pending requests on
	 * disconnection. waitqueue_active is safe because it
	 * uses atomic operation for condition.
	 */
	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
		wake_up(&conn->r_count_q);
}

/**