Commit 1aec5e4a authored by Jens Axboe's avatar Jens Axboe
Browse files

sbitmap: add helper to clear a batch of tags



sbitmap currently only supports clearing tags one-by-one, add a helper
that allows the caller to pass in an array of tags to clear.

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5a72e899
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -528,6 +528,17 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
			 unsigned int cpu);

/**
 * sbitmap_queue_clear_batch() - Free a batch of allocated bits
 * &struct sbitmap_queue.
 * @sbq: Bitmap to free from.
 * @offset: offset for each tag in array
 * @tags: array of tags
 * @nr_tags: number of tags in array
 */
void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
				int *tags, int nr_tags);

static inline int sbq_index_inc(int index)
{
	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
+41 −3
Original line number Diff line number Diff line
@@ -628,6 +628,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);

static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
{
	if (likely(!sb->round_robin && tag < sb->depth))
		*per_cpu_ptr(sb->alloc_hint, cpu) = tag;
}

void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
				int *tags, int nr_tags)
{
	struct sbitmap *sb = &sbq->sb;
	unsigned long *addr = NULL;
	unsigned long mask = 0;
	int i;

	smp_mb__before_atomic();
	for (i = 0; i < nr_tags; i++) {
		const int tag = tags[i] - offset;
		unsigned long *this_addr;

		/* since we're clearing a batch, skip the deferred map */
		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
		if (!addr) {
			addr = this_addr;
		} else if (addr != this_addr) {
			atomic_long_andnot(mask, (atomic_long_t *) addr);
			mask = 0;
			addr = this_addr;
		}
		mask |= (1UL << SB_NR_TO_BIT(sb, tag));
	}

	if (mask)
		atomic_long_andnot(mask, (atomic_long_t *) addr);

	smp_mb__after_atomic();
	sbitmap_queue_wake_up(sbq);
	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
					tags[nr_tags - 1] - offset);
}

void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
			 unsigned int cpu)
{
@@ -652,9 +692,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
	 */
	smp_mb__after_atomic();
	sbitmap_queue_wake_up(sbq);

	if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
		*per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);