[PATCH bpf-next v1 1/4] bpf: Support reporting BPF htab map's used size for monitoring

From: Ho-Ren (Jack) Chuang
Date: Fri Nov 04 2022 - 22:52:36 EST


Expose BPF htab map's used size by counting accessed or allocated/freed
elements to userspace.

Leverage the htab->count value for both preallocated and
dynamically allocated maps. Expose the value to a new field
"used_entries" in a userspace struct bpf_map_info to allow monitoring.
Support hash table type (BPF_MAP_TYPE_HASH).

Signed-off-by: Ho-Ren (Jack) Chuang <horenchuang@xxxxxxxxxxxxx>
---
include/linux/bpf.h | 1 +
include/uapi/linux/bpf.h | 1 +
kernel/bpf/hashtab.c | 19 +++++++++++++++++++
kernel/bpf/syscall.c | 2 ++
4 files changed, 23 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9e7d46d16032..82ee14139b69 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -97,6 +97,7 @@ struct bpf_map_ops {
int (*map_pop_elem)(struct bpf_map *map, void *value);
int (*map_peek_elem)(struct bpf_map *map, void *value);
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
+ u32 (*map_get_used_elem)(struct bpf_map *map);

/* funcs called by prog_array and perf_event_array map */
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 17f61338f8f8..63659368cf0e 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -6215,6 +6215,7 @@ struct bpf_map_info {
__u32 id;
__u32 key_size;
__u32 value_size;
+ __u32 used_entries;
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index ed3f8a53603b..bc9c00b92e57 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -913,6 +913,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
if (htab_is_prealloc(htab)) {
check_and_free_fields(htab, l);
__pcpu_freelist_push(&htab->freelist, &l->fnode);
+ dec_elem_count(htab);
} else {
dec_elem_count(htab);
htab_elem_free(htab, l);
@@ -994,6 +995,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
if (!l)
return ERR_PTR(-E2BIG);
l_new = container_of(l, struct htab_elem, fnode);
+ inc_elem_count(htab);
}
} else {
if (is_map_full(htab))
@@ -2186,6 +2188,22 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_f
return num_elems;
}

+u32 htab_map_get_used_elem(struct bpf_map *map)
+{
+ struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+
+ /* The elem count may temporarily go beyond the max after
+ * inc_elem_count() but before dec_elem_count().
+ */
+ if (htab->use_percpu_counter)
+ return min_t(u32, htab->map.max_entries,
+ percpu_counter_sum(&htab->pcount) +
+ atomic_read(&htab->count));
+ else
+ return min_t(u32, htab->map.max_entries,
+ atomic_read(&htab->count));
+}
+
BTF_ID_LIST_SINGLE(htab_map_btf_ids, struct, bpf_htab)
const struct bpf_map_ops htab_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
@@ -2202,6 +2220,7 @@ const struct bpf_map_ops htab_map_ops = {
.map_seq_show_elem = htab_map_seq_show_elem,
.map_set_for_each_callback_args = map_set_for_each_callback_args,
.map_for_each_callback = bpf_for_each_hash_elem,
+ .map_get_used_elem = htab_map_get_used_elem,
BATCH_OPS(htab),
.map_btf_id = &htab_map_btf_ids[0],
.iter_seq_info = &iter_seq_info,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7b373a5e861f..ea4828bb22ac 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -4203,6 +4203,8 @@ static int bpf_map_get_info_by_fd(struct file *file,
info.map_flags = map->map_flags;
info.map_extra = map->map_extra;
memcpy(info.name, map->name, sizeof(map->name));
+ if (map->ops->map_get_used_elem)
+ info.used_entries = map->ops->map_get_used_elem(map);

if (map->btf) {
info.btf_id = btf_obj_id(map->btf);
--
Ho-Ren (Jack) Chuang