On Fri, May 2, 2025 at 3:20 AM Lorenz Bauer lmb@isovalent.com wrote:
User space needs access to kernel BTF for many modern features of BPF. Right now each process needs to read the BTF blob either in pieces or as a whole. Allow mmaping the sysfs file so that processes can directly access the memory allocated for it in the kernel.
Signed-off-by: Lorenz Bauer lmb@isovalent.com
include/asm-generic/vmlinux.lds.h | 3 ++- kernel/bpf/sysfs_btf.c | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 3 deletions(-)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 58a635a6d5bdf0c53c267c2a3d21a5ed8678ce73..1750390735fac7637cc4d2fa05f96cb2a36aa448 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -667,10 +667,11 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) */ #ifdef CONFIG_DEBUG_INFO_BTF #define BTF \
. = ALIGN(PAGE_SIZE); \ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ BOUNDED_SECTION_BY(.BTF, _BTF) \ } \
. = ALIGN(4); \
. = ALIGN(PAGE_SIZE); \ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ *(.BTF_ids) \ }
diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c index 81d6cf90584a7157929c50f62a5c6862e7a3d081..f4b59b1c2e5b11ffffa80662ad39334c730019ee 100644 --- a/kernel/bpf/sysfs_btf.c +++ b/kernel/bpf/sysfs_btf.c @@ -7,18 +7,50 @@ #include <linux/kobject.h> #include <linux/init.h> #include <linux/sysfs.h> +#include <linux/mm.h> +#include <linux/io.h>
/* See scripts/link-vmlinux.sh, gen_btf() func for details */ extern char __start_BTF[]; extern char __stop_BTF[];
+struct kobject *btf_kobj;
+static int btf_vmlinux_mmap(struct file *filp, struct kobject *kobj,
const struct bin_attribute *attr,
struct vm_area_struct *vma)
+{
phys_addr_t start = virt_to_phys(__start_BTF);
size_t btf_size = __stop_BTF - __start_BTF;
size_t vm_size = vma->vm_end - vma->vm_start;
unsigned long pfn = start >> PAGE_SHIFT;
unsigned long pages = PAGE_ALIGN(btf_size) >> PAGE_SHIFT;
if (kobj != btf_kobj)
return -EINVAL;
if (vma->vm_pgoff)
return -EINVAL;
if (vma->vm_flags & (VM_WRITE|VM_EXEC|VM_MAYSHARE))
return -EACCES;
if (pfn + pages < pfn)
return -EINVAL;
if (vm_size >> PAGE_SHIFT > pages)
return -EINVAL;
vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC|VM_MAYWRITE);
return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot);
remap_pfn_range() should be avoided. See big comment in kernel/events/core.c in map_range().
The following seems to work: diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c index f4b59b1c2e5b..7d0fd28070d8 100644 --- a/kernel/bpf/sysfs_btf.c +++ b/kernel/bpf/sysfs_btf.c @@ -20,13 +20,13 @@ static int btf_vmlinux_mmap(struct file *filp, struct kobject *kobj, const struct bin_attribute *attr, struct vm_area_struct *vma) { - phys_addr_t start = virt_to_phys(__start_BTF); + unsigned long addr = (unsigned long)__start_BTF; size_t btf_size = __stop_BTF - __start_BTF; size_t vm_size = vma->vm_end - vma->vm_start; - unsigned long pfn = start >> PAGE_SHIFT; unsigned long pages = PAGE_ALIGN(btf_size) >> PAGE_SHIFT; + int i, err = 0;
- if (kobj != btf_kobj) + if (kobj != btf_kobj || !pages) return -EINVAL;
if (vma->vm_pgoff) @@ -35,14 +35,17 @@ static int btf_vmlinux_mmap(struct file *filp, struct kobject *kobj, if (vma->vm_flags & (VM_WRITE|VM_EXEC|VM_MAYSHARE)) return -EACCES;
- if (pfn + pages < pfn) - return -EINVAL; - if (vm_size >> PAGE_SHIFT > pages) return -EINVAL;
vm_flags_mod(vma, VM_DONTDUMP, VM_MAYEXEC|VM_MAYWRITE); - return remap_pfn_range(vma, vma->vm_start, pfn, vm_size, vma->vm_page_prot); + + for (i = 0; i < pages && !err; i++, addr += PAGE_SIZE) + err = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE, + virt_to_page(addr)); + if (err) + zap_page_range_single(vma, vma->vm_start, pages * PAGE_SIZE, NULL); + return err; }
Great that you added: /* Check padding is zeroed */ for (int i = 0; i < trailing; i++) { if (((__u8 *)raw_data)[btf_size + i] != 0) { PRINT_FAIL("tail of BTF is not zero at page offset %d\n", i); goto cleanup; } }
but this part is puzzling: trailing = page_size - (btf_size % page_size) % page_size;