Commit 148e3731 authored by Uladzislau Rezki (Sony)'s avatar Uladzislau Rezki (Sony) Committed by Paul E. McKenney
Browse files

kvfree_rcu: Directly allocate page for single-argument case



Single-argument kvfree_rcu() must be invoked from sleepable contexts,
so we can directly allocate pages.  Furthermmore, the fallback in case
of page-allocation failure is the high-latency synchronize_rcu(), so it
makes sense to do these page allocations from the fastpath, and even to
permit limited sleeping within the allocator.

This commit therefore allocates if needed on the fastpath using
GFP_KERNEL|__GFP_RETRY_MAYFAIL.  This also has the beneficial effect
of leaving kvfree_rcu()'s per-CPU caches to the double-argument variant
of kvfree_rcu(), given that the double-argument variant cannot directly
invoke the allocator.

[ paulmck: Add add_ptr_to_bulk_krc_lock header comment per Michal Hocko. ]
Signed-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent a38fd874
Loading
Loading
Loading
Loading
+26 −16
Original line number Diff line number Diff line
@@ -3493,37 +3493,50 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp)
	}
}

// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
// state specified by flags.  If can_alloc is true, the caller must
// be schedulable and not be holding any locks or mutexes that might be
// acquired by the memory allocator or anything that it might invoke.
// Returns true if ptr was successfully recorded, else the caller must
// use a fallback.
static inline bool
kvfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp, void *ptr)
add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
	unsigned long *flags, void *ptr, bool can_alloc)
{
	struct kvfree_rcu_bulk_data *bnode;
	int idx;

	if (unlikely(!krcp->initialized))
	*krcp = krc_this_cpu_lock(flags);
	if (unlikely(!(*krcp)->initialized))
		return false;

	lockdep_assert_held(&krcp->lock);
	idx = !!is_vmalloc_addr(ptr);

	/* Check if a new block is required. */
	if (!krcp->bkvhead[idx] ||
			krcp->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
		bnode = get_cached_bnode(krcp);
		/* Switch to emergency path. */
	if (!(*krcp)->bkvhead[idx] ||
			(*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
		bnode = get_cached_bnode(*krcp);
		if (!bnode && can_alloc) {
			krc_this_cpu_unlock(*krcp, *flags);
			bnode = (struct kvfree_rcu_bulk_data *)
				__get_free_page(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
			*krcp = krc_this_cpu_lock(flags);
		}

		if (!bnode)
			return false;

		/* Initialize the new block. */
		bnode->nr_records = 0;
		bnode->next = krcp->bkvhead[idx];
		bnode->next = (*krcp)->bkvhead[idx];

		/* Attach it to the head. */
		krcp->bkvhead[idx] = bnode;
		(*krcp)->bkvhead[idx] = bnode;
	}

	/* Finally insert. */
	krcp->bkvhead[idx]->records
		[krcp->bkvhead[idx]->nr_records++] = ptr;
	(*krcp)->bkvhead[idx]->records
		[(*krcp)->bkvhead[idx]->nr_records++] = ptr;

	return true;
}
@@ -3561,8 +3574,6 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
		ptr = (unsigned long *) func;
	}

	krcp = krc_this_cpu_lock(&flags);

	// Queue the object but don't yet schedule the batch.
	if (debug_rcu_head_queue(ptr)) {
		// Probable double kfree_rcu(), just leak.
@@ -3570,12 +3581,11 @@ void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
			  __func__, head);

		// Mark as success and leave.
		success = true;
		goto unlock_return;
		return;
	}

	kasan_record_aux_stack(ptr);
	success = kvfree_call_rcu_add_ptr_to_bulk(krcp, ptr);
	success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
	if (!success) {
		run_page_cache_worker(krcp);