Commit 3dd9c8b5 authored by David Howells's avatar David Howells
Browse files

rxrpc: Remove the _bh annotation from all the spinlocks



None of the spinlocks in rxrpc need a _bh annotation now as the RCU
callback routines no longer take spinlocks and the bulk of the packet
wrangling code is now run in the I/O thread, not softirq context.

Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: linux-afs@lists.infradead.org
parent 5e6ef4f1
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -359,9 +359,9 @@ void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)

	/* Make sure we're not going to call back into a kernel service */
	if (call->notify_rx) {
		spin_lock_bh(&call->notify_lock);
		spin_lock(&call->notify_lock);
		call->notify_rx = rxrpc_dummy_notify_rx;
		spin_unlock_bh(&call->notify_lock);
		spin_unlock(&call->notify_lock);
	}

	mutex_unlock(&call->user_mutex);
+4 −4
Original line number Diff line number Diff line
@@ -138,9 +138,9 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
	write_unlock(&rx->call_lock);

	rxnet = call->rxnet;
	spin_lock_bh(&rxnet->call_lock);
	spin_lock(&rxnet->call_lock);
	list_add_tail_rcu(&call->link, &rxnet->calls);
	spin_unlock_bh(&rxnet->call_lock);
	spin_unlock(&rxnet->call_lock);

	b->call_backlog[call_head] = call;
	smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1));
@@ -188,8 +188,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
	/* Make sure that there aren't any incoming calls in progress before we
	 * clear the preallocation buffers.
	 */
	spin_lock_bh(&rx->incoming_lock);
	spin_unlock_bh(&rx->incoming_lock);
	spin_lock(&rx->incoming_lock);
	spin_unlock(&rx->incoming_lock);

	head = b->peer_backlog_head;
	tail = b->peer_backlog_tail;
+2 −2
Original line number Diff line number Diff line
@@ -101,9 +101,9 @@ void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason,
		return;
	}

	spin_lock_bh(&local->ack_tx_lock);
	spin_lock(&local->ack_tx_lock);
	list_add_tail(&txb->tx_link, &local->ack_tx_queue);
	spin_unlock_bh(&local->ack_tx_lock);
	spin_unlock(&local->ack_tx_lock);
	trace_rxrpc_send_ack(call, why, ack_reason, serial);

	rxrpc_wake_up_io_thread(local);
+10 −10
Original line number Diff line number Diff line
@@ -354,9 +354,9 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
	write_unlock(&rx->call_lock);

	rxnet = call->rxnet;
	spin_lock_bh(&rxnet->call_lock);
	spin_lock(&rxnet->call_lock);
	list_add_tail_rcu(&call->link, &rxnet->calls);
	spin_unlock_bh(&rxnet->call_lock);
	spin_unlock(&rxnet->call_lock);

	/* From this point on, the call is protected by its own lock. */
	release_sock(&rx->sk);
@@ -537,7 +537,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
	del_timer_sync(&call->timer);

	/* Make sure we don't get any more notifications */
	write_lock_bh(&rx->recvmsg_lock);
	write_lock(&rx->recvmsg_lock);

	if (!list_empty(&call->recvmsg_link)) {
		_debug("unlinking once-pending call %p { e=%lx f=%lx }",
@@ -550,7 +550,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
	call->recvmsg_link.next = NULL;
	call->recvmsg_link.prev = NULL;

	write_unlock_bh(&rx->recvmsg_lock);
	write_unlock(&rx->recvmsg_lock);
	if (put)
		rxrpc_put_call(call, rxrpc_call_put_unnotify);

@@ -622,9 +622,9 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
		ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);

		if (!list_empty(&call->link)) {
			spin_lock_bh(&rxnet->call_lock);
			spin_lock(&rxnet->call_lock);
			list_del_init(&call->link);
			spin_unlock_bh(&rxnet->call_lock);
			spin_unlock(&rxnet->call_lock);
		}

		rxrpc_cleanup_call(call);
@@ -706,7 +706,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
	_enter("");

	if (!list_empty(&rxnet->calls)) {
		spin_lock_bh(&rxnet->call_lock);
		spin_lock(&rxnet->call_lock);

		while (!list_empty(&rxnet->calls)) {
			call = list_entry(rxnet->calls.next,
@@ -721,12 +721,12 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
			       rxrpc_call_states[call->state],
			       call->flags, call->events);

			spin_unlock_bh(&rxnet->call_lock);
			spin_unlock(&rxnet->call_lock);
			cond_resched();
			spin_lock_bh(&rxnet->call_lock);
			spin_lock(&rxnet->call_lock);
		}

		spin_unlock_bh(&rxnet->call_lock);
		spin_unlock(&rxnet->call_lock);
	}

	atomic_dec(&rxnet->nr_calls);
+2 −2
Original line number Diff line number Diff line
@@ -557,9 +557,9 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,

	trace_rxrpc_connect_call(call);

	write_lock_bh(&call->state_lock);
	write_lock(&call->state_lock);
	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
	write_unlock_bh(&call->state_lock);
	write_unlock(&call->state_lock);

	/* Paired with the read barrier in rxrpc_connect_call().  This orders
	 * cid and epoch in the connection wrt to call_id without the need to
Loading