The intermediate product value_size * num_possible_cpus() is evaluated in 32-bit arithmetic and only then promoted to 64 bits. On systems with large value_size and many possible CPUs this can overflow and lead to an underestimated memory usage.
Cast value_size to u64 before multiplying.
Found by Linux Verification Center (linuxtesting.org) with SVACE.
Fixes: 304849a27b34 ("bpf: hashtab memory usage") Cc: stable@vger.kernel.org Signed-off-by: Alexei Safin a.safin@rosa.ru --- kernel/bpf/hashtab.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 570e2f723144..7ad6b5137ba1 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -2269,7 +2269,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map) usage += htab->elem_size * num_entries;
if (percpu) - usage += value_size * num_possible_cpus() * num_entries; + usage += (u64)value_size * num_possible_cpus() * num_entries; else if (!lru) usage += sizeof(struct htab_elem *) * num_possible_cpus(); } else { @@ -2281,7 +2281,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map) usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; if (percpu) { usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries; - usage += value_size * num_possible_cpus() * num_entries; + usage += (u64)value_size * num_possible_cpus() * num_entries; } } return usage;