Many maps have their create masks to warn the invalid map flag. Set BPF_F_NO_CHARGE in all these masks to enable it.
Signed-off-by: Yafang Shao laoar.shao@gmail.com --- kernel/bpf/arraymap.c | 2 +- kernel/bpf/bloom_filter.c | 4 ++-- kernel/bpf/bpf_local_storage.c | 3 ++- kernel/bpf/bpf_struct_ops.c | 5 ++++- kernel/bpf/cpumap.c | 5 ++++- kernel/bpf/devmap.c | 2 +- kernel/bpf/hashtab.c | 2 +- kernel/bpf/local_storage.c | 2 +- kernel/bpf/lpm_trie.c | 2 +- kernel/bpf/queue_stack_maps.c | 2 +- kernel/bpf/ringbuf.c | 2 +- kernel/bpf/stackmap.c | 2 +- net/core/sock_map.c | 2 +- net/xdp/xskmap.c | 5 ++++- 14 files changed, 25 insertions(+), 15 deletions(-)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 7f145aefbff8..ac123747303c 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -16,7 +16,7 @@
#define ARRAY_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \ - BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP) + BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP | BPF_F_NO_CHARGE)
static void bpf_array_free_percpu(struct bpf_array *array) { diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c index b141a1346f72..f8ebfb4831e5 100644 --- a/kernel/bpf/bloom_filter.c +++ b/kernel/bpf/bloom_filter.c @@ -8,8 +8,8 @@ #include <linux/jhash.h> #include <linux/random.h>
-#define BLOOM_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK) +#define BLOOM_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | \ + BPF_F_ZERO_SEED | BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
struct bpf_bloom_filter { struct bpf_map map; diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c index 092a1ac772d7..a92d3032fcde 100644 --- a/kernel/bpf/bpf_local_storage.c +++ b/kernel/bpf/bpf_local_storage.c @@ -15,7 +15,8 @@ #include <linux/rcupdate_trace.h> #include <linux/rcupdate_wait.h>
-#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE) +#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK \ + (BPF_F_NO_PREALLOC | BPF_F_CLONE | BPF_F_NO_CHARGE)
static struct bpf_local_storage_map_bucket * select_bucket(struct bpf_local_storage_map *smap, diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 21069dbe9138..09eb848e6d12 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -21,6 +21,8 @@ enum bpf_struct_ops_state { refcount_t refcnt; \ enum bpf_struct_ops_state state
+#define STRUCT_OPS_CREATE_FLAG_MASK (BPF_F_NO_CHARGE) + struct bpf_struct_ops_value { BPF_STRUCT_OPS_COMMON_VALUE; char data[] ____cacheline_aligned_in_smp; @@ -556,7 +558,8 @@ static void bpf_struct_ops_map_free(struct bpf_map *map) static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr) { if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || - attr->map_flags || !attr->btf_vmlinux_value_type_id) + attr->map_flags & ~STRUCT_OPS_CREATE_FLAG_MASK || + !attr->btf_vmlinux_value_type_id) return -EINVAL; return 0; } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 650e5d21f90d..201226fc652b 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -39,6 +39,9 @@ */
#define CPU_MAP_BULK_SIZE 8 /* 8 == one cacheline on 64-bit archs */ + +#define CPU_MAP_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | BPF_F_NO_CHARGE) + struct bpf_cpu_map_entry; struct bpf_cpu_map;
@@ -93,7 +96,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) if (attr->max_entries == 0 || attr->key_size != 4 || (value_size != offsetofend(struct bpf_cpumap_val, qsize) && value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || - attr->map_flags & ~BPF_F_NUMA_NODE) + attr->map_flags & ~CPU_MAP_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL);
cmap = kzalloc(sizeof(*cmap), GFP_USER | __GFP_ACCOUNT); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 038f6d7a83e4..39bf8b521f27 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -50,7 +50,7 @@ #include <trace/events/xdp.h>
#define DEV_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_NO_CHARGE)
struct xdp_dev_bulk_queue { struct xdp_frame *q[DEV_MAP_BULK_SIZE]; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 65877967f414..5c6ec8780b09 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -16,7 +16,7 @@
#define HTAB_CREATE_FLAG_MASK \ (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ - BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED) + BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED | BPF_F_NO_CHARGE)
#define BATCH_OPS(_name) \ .map_lookup_batch = \ diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 497916060ac7..865766c240d6 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -15,7 +15,7 @@ #include "../cgroup/cgroup-internal.h"
#define LOCAL_STORAGE_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) + (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
struct bpf_cgroup_storage_map { struct bpf_map map; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 5763cc7ac4f1..f42edf613624 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -537,7 +537,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE | \ - BPF_F_ACCESS_MASK) + BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
static struct bpf_map *trie_alloc(union bpf_attr *attr) { diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index f9c734aaa990..c78eed4659ce 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -11,7 +11,7 @@ #include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) + (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK | BPF_F_NO_CHARGE)
struct bpf_queue_stack { struct bpf_map map; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 710ba9de12ce..88779f688679 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -11,7 +11,7 @@ #include <linux/kmemleak.h> #include <uapi/linux/btf.h>
-#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE) +#define RINGBUF_CREATE_FLAG_MASK (BPF_F_NUMA_NODE | BPF_F_NO_CHARGE)
/* non-mmap()'able part of bpf_ringbuf (everything up to consumer page) */ #define RINGBUF_PGOFF \ diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 38bdfcd06f55..b2e7dc1d9f5a 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -14,7 +14,7 @@
#define STACK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ - BPF_F_STACK_BUILD_ID) + BPF_F_STACK_BUILD_ID | BPF_F_NO_CHARGE)
struct stack_map_bucket { struct pcpu_freelist_node fnode; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 2d213c4011db..7b0215bea413 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -22,7 +22,7 @@ struct bpf_stab { };
#define SOCK_CREATE_FLAG_MASK \ - (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_NO_CHARGE)
static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, struct bpf_prog *old, u32 which); diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index 65b53fb3de13..10a5ae727bd5 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -12,6 +12,9 @@
#include "xsk.h"
+#define XSK_MAP_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_NO_CHARGE) + static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, struct xdp_sock __rcu **map_entry) { @@ -68,7 +71,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
if (attr->max_entries == 0 || attr->key_size != 4 || attr->value_size != 4 || - attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) + attr->map_flags & ~XSK_MAP_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL);
numa_node = bpf_map_attr_numa_node(attr);