Commit 9263dddc authored by Takshak Chahande's avatar Takshak Chahande Committed by Alexei Starovoitov
Browse files

bpf: Extend batch operations for map-in-map bpf-maps



This patch extends batch operations support for map-in-map map-types:
BPF_MAP_TYPE_HASH_OF_MAPS and BPF_MAP_TYPE_ARRAY_OF_MAPS

A usecase where outer HASH map holds hundred of VIP entries and its
associated reuse-ports per VIP stored in REUSEPORT_SOCKARRAY type
inner map, needs to do batch operation for performance gain.

This patch leverages the exiting generic functions for most of the batch
operations. As map-in-map's value contains the actual reference of the inner map,
for BPF_MAP_TYPE_HASH_OF_MAPS type, it needed an extra step to fetch the
map_id from the reference value.

selftests are added in next patch 2/2.

Signed-off-by: default avatarTakshak Chahande <ctakshak@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20220510082221.2390540-1-ctakshak@fb.com
parent 174efa78
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1345,6 +1345,8 @@ const struct bpf_map_ops array_of_maps_map_ops = {
	.map_fd_put_ptr = bpf_map_fd_put_ptr,
	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
	.map_gen_lookup = array_of_map_gen_lookup,
	.map_lookup_batch = generic_map_lookup_batch,
	.map_update_batch = generic_map_update_batch,
	.map_check_btf = map_check_no_btf,
	.map_btf_id = &array_map_btf_ids[0],
};
+11 −2
Original line number Diff line number Diff line
@@ -140,7 +140,7 @@ static inline bool htab_use_raw_lock(const struct bpf_htab *htab)

static void htab_init_buckets(struct bpf_htab *htab)
{
	unsigned i;
	unsigned int i;

	for (i = 0; i < htab->n_buckets; i++) {
		INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
@@ -1627,7 +1627,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
	void __user *uvalues = u64_to_user_ptr(attr->batch.values);
	void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
	u32 batch, max_count, size, bucket_size;
	u32 batch, max_count, size, bucket_size, map_id;
	struct htab_elem *node_to_free = NULL;
	u64 elem_map_flags, map_flags;
	struct hlist_nulls_head *head;
@@ -1752,6 +1752,14 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
			}
		} else {
			value = l->key + roundup_key_size;
			if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
				struct bpf_map **inner_map = value;

				 /* Actual value is the id of the inner map */
				map_id = map->ops->map_fd_sys_lookup_elem(*inner_map);
				value = &map_id;
			}

			if (elem_map_flags & BPF_F_LOCK)
				copy_map_value_locked(map, dst_val, value,
						      true);
@@ -2450,5 +2458,6 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
	.map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
	.map_gen_lookup = htab_of_map_gen_lookup,
	.map_check_btf = map_check_no_btf,
	BATCH_OPS(htab),
	.map_btf_id = &htab_map_btf_ids[0],
};