Hello all,
This patch series targets a long-standing BPF usability issue - the lack of general cross-compilation support - by enabling cross-endian usage of libbpf and cross-endian build targets for selftests/bpf. Use cases range from better BPF support for embedded systems based on e.g. big-endian MIPS, to more build/test options for s390x systems.
Initial development and testing used mips64, since this arch makes switching the build byte-order trivial and is thus very handy for A/B testing. However, it lacks some key features (bpf2bpf call, kfuncs, etc) making for poor selftests/bpf coverage.
Final testing takes the kernel and selftests/bpf cross-built from x86_64 to s390x, and runs the result under QEMU/s390x. That same configuration could also be used on kernel-patches/bpf CI for regression testing endian support or perhaps load-sharing s390x builds across x86_64 systems.
This thread includes some background regarding testing on QEMU/s390x and the generally favourable results (3 failures running test_progs): https://lore.kernel.org/bpf/ZsEcsaa3juxxQBUf@kodidev-ubuntu/
Feedback and suggestions are welcome!
Best regards, Tony
Tony Ambardar (8): libbpf: Improve log message formatting libbpf: Fix header comment typos for BTF.ext libbpf: Fix output .symtab byte-order during linking libbpf: Support BTF.ext loading and output in either endianness libbpf: Support opening bpf objects of either endianness libbpf: Support linking bpf objects of either endianness libbpf: Support creating light skeleton of either endianness selftests/bpf: Support cross-endian building
tools/lib/bpf/bpf_gen_internal.h | 1 + tools/lib/bpf/btf.c | 167 +++++++++++++++++++++++-- tools/lib/bpf/btf.h | 2 + tools/lib/bpf/btf_dump.c | 2 +- tools/lib/bpf/btf_relocate.c | 2 +- tools/lib/bpf/gen_loader.c | 179 +++++++++++++++++++++------ tools/lib/bpf/libbpf.c | 26 +++- tools/lib/bpf/libbpf.map | 2 + tools/lib/bpf/libbpf_internal.h | 17 ++- tools/lib/bpf/linker.c | 108 +++++++++++++--- tools/lib/bpf/relo_core.c | 2 +- tools/lib/bpf/skel_internal.h | 3 +- tools/testing/selftests/bpf/Makefile | 7 +- 13 files changed, 438 insertions(+), 80 deletions(-)
Fix missing newlines and extraneous terminal spaces in messages.
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/btf.c | 6 +++--- tools/lib/bpf/btf_dump.c | 2 +- tools/lib/bpf/btf_relocate.c | 2 +- tools/lib/bpf/libbpf.c | 4 ++-- tools/lib/bpf/relo_core.c | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 32c00db3b91b..f5081de86ee0 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -2940,7 +2940,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
/* If no records, return failure now so .BTF.ext won't be used. */ if (!info_left) { - pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); + pr_debug("%s section in .BTF.ext has no records\n", ext_sec->desc); return -EINVAL; }
@@ -3028,7 +3028,7 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
if (data_size < offsetofend(struct btf_ext_header, hdr_len) || data_size < hdr->hdr_len) { - pr_debug("BTF.ext header not found"); + pr_debug("BTF.ext header not found\n"); return -EINVAL; }
@@ -3290,7 +3290,7 @@ int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
d = btf_dedup_new(btf, opts); if (IS_ERR(d)) { - pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); + pr_debug("btf_dedup_new failed: %ld\n", PTR_ERR(d)); return libbpf_err(-EINVAL); }
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 894860111ddb..25e7c44d9f95 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1304,7 +1304,7 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, * chain, restore stack, emit warning, and try to * proceed nevertheless */ - pr_warn("not enough memory for decl stack:%d", err); + pr_warn("not enough memory for decl stack:%d\n", err); d->decl_stack_cnt = stack_start; return; } diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c index 4f7399d85eab..b72f83e15156 100644 --- a/tools/lib/bpf/btf_relocate.c +++ b/tools/lib/bpf/btf_relocate.c @@ -428,7 +428,7 @@ static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i) } else { off = r->str_map[*str_off]; if (!off) { - pr_warn("string '%s' [offset %u] is not mapped to base BTF", + pr_warn("string '%s' [offset %u] is not mapped to base BTF\n", btf__str_by_offset(r->btf, off), *str_off); return -ENOENT; } diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e55353887439..8a0a0c1e37e1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -12753,7 +12753,7 @@ struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, }
if (prog->type != BPF_PROG_TYPE_EXT) { - pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", + pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace\n", prog->name); return libbpf_err_ptr(-EINVAL); } @@ -13843,7 +13843,7 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) map_type = btf__type_by_id(btf, map_type_id);
if (!btf_is_datasec(map_type)) { - pr_warn("type for map '%1$s' is not a datasec: %2$s", + pr_warn("type for map '%1$s' is not a datasec: %2$s\n", bpf_map__name(map), __btf_kind_str(btf_kind(map_type))); return libbpf_err(-EINVAL); diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index 63a4d5ad12d1..7632e9d41827 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -1339,7 +1339,7 @@ int bpf_core_calc_relo_insn(const char *prog_name, cands->cands[i].id, cand_spec); if (err < 0) { bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec); - pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ", + pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n", prog_name, relo_idx, i, spec_buf, err); return err; }
Mention struct btf_ext_info_sec rather than non-existent btf_sec_func_info in BTF.ext struct documentation.
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/libbpf_internal.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 408df59e0771..8cda511a1982 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -448,11 +448,11 @@ struct btf_ext_info { * * The func_info subsection layout: * record size for struct bpf_func_info in the func_info subsection - * struct btf_sec_func_info for section #1 + * struct btf_ext_info_sec for section #1 * a list of bpf_func_info records for section #1 * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h * but may not be identical - * struct btf_sec_func_info for section #2 + * struct btf_ext_info_sec for section #2 * a list of bpf_func_info records for section #2 * ...... *
Object linking output data uses the default ELF_T_BYTE type for '.symtab' section data, which disables any libelf-based translation. Explicitly set the ELF_T_SYM type for output to restore libelf's byte-order conversion, noting that input '.symtab' data is already correctly translated.
Fixes: faf6ed321cf6 ("libbpf: Add BPF static linker APIs") Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/linker.c | 2 ++ 1 file changed, 2 insertions(+)
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 9cd3d4109788..7489306cd6f7 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -396,6 +396,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) pr_warn_elf("failed to create SYMTAB data"); return -EINVAL; } + /* Ensure libelf translates byte-order of symbol records */ + sec->data->d_type = ELF_T_SYM;
str_off = strset__add_str(linker->strtab_strs, sec->sec_name); if (str_off < 0)
Support for handling BTF data of either endianness was added in [1], but did not include BTF.ext data for lack of use cases. Later, support for static linking [2] provided a use case, but this feature and later ones were restricted to native-endian usage.
Add support for BTF.ext handling in either endianness. Convert BTF.ext data to native endianness when read into memory for further processing, and support raw data access that restores the original byte-order for output.
Add new API functions btf_ext__endianness() and btf_ext__set_endianness() for query and setting byte-order, as already exist for BTF data.
[1]:commit 3289959b97ca ("libbpf: Support BTF loading and raw data output in both endianness") [2]:commit 8fd27bf69b86 ("libbpf: Add BPF static linker BTF and BTF.ext support")
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/btf.c | 163 ++++++++++++++++++++++++++++++-- tools/lib/bpf/btf.h | 2 + tools/lib/bpf/libbpf.map | 2 + tools/lib/bpf/libbpf_internal.h | 2 + 4 files changed, 160 insertions(+), 9 deletions(-)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index f5081de86ee0..fba6988a7820 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -2884,6 +2884,52 @@ int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id, return btf_commit_type(btf, sz); }
+/* + * Swap endianness of the info segment in a BTF.ext data section: + * - requires BTF.ext header data in native byte order + * - expects info record fields are 32-bit + */ +static int btf_ext_bswap_info(void *data, const __u32 data_size) +{ + const struct btf_ext_header *hdr = data; + __u32 info_size, sum_len, i, *p; + + if (data_size < offsetofend(struct btf_ext_header, hdr_len)) { + pr_warn("BTF.ext initial header not found\n"); + return -EINVAL; + } + + if (data_size < hdr->hdr_len) { + pr_warn("BTF.ext header not found\n"); + return -EINVAL; + } + + if (hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) { + pr_warn("BTF.ext header missing func_info, line_info\n"); + return -EINVAL; + } + + sum_len = hdr->func_info_len + hdr->line_info_len; + if (hdr->hdr_len >= offsetofend(struct btf_ext_header, core_relo_len)) + sum_len += hdr->core_relo_len; + + info_size = data_size - hdr->hdr_len; + if (info_size != sum_len) { + pr_warn("BTF.ext info size mismatch with header data\n"); + return -EINVAL; + } + + if (info_size && info_size % sizeof(__u32)) { + pr_warn("BTF.ext info size not 32-bit multiple\n"); + return -EINVAL; + } + + p = data + hdr->hdr_len; + for (i = 0; i < info_size / sizeof(__u32); i++, p++) + *p = bswap_32(*p); + return 0; +} + struct btf_ext_sec_setup_param { __u32 off; __u32 len; @@ -3022,24 +3068,56 @@ static int btf_ext_setup_core_relos(struct btf_ext *btf_ext) return btf_ext_setup_info(btf_ext, ¶m); }
-static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) +/* Swap byte-order of BTF.ext header */ +static void btf_ext_bswap_hdr(struct btf_ext_header *h) { - const struct btf_ext_header *hdr = (struct btf_ext_header *)data; + __u32 hdr_len = h->hdr_len; /* need native byte-order */
- if (data_size < offsetofend(struct btf_ext_header, hdr_len) || - data_size < hdr->hdr_len) { - pr_debug("BTF.ext header not found\n"); + if (h->magic == bswap_16(BTF_MAGIC)) + hdr_len = bswap_32(hdr_len); + + h->magic = bswap_16(h->magic); + h->hdr_len = bswap_32(h->hdr_len); + h->func_info_off = bswap_32(h->func_info_off); + h->func_info_len = bswap_32(h->func_info_len); + h->line_info_off = bswap_32(h->line_info_off); + h->line_info_len = bswap_32(h->line_info_len); + + if (hdr_len < offsetofend(struct btf_ext_header, core_relo_len)) + return; + + h->core_relo_off = bswap_32(h->core_relo_off); + h->core_relo_len = bswap_32(h->core_relo_len); +} + +static int btf_ext_parse_hdr(struct btf_ext *btf_ext) +{ + struct btf_ext_header *hdr = btf_ext->hdr; + __u32 hdr_len, data_size = btf_ext->data_size; + + if (data_size < offsetofend(struct btf_ext_header, hdr_len)) { + pr_debug("BTF.ext initial header not found\n"); return -EINVAL; }
+ hdr_len = hdr->hdr_len; if (hdr->magic == bswap_16(BTF_MAGIC)) { - pr_warn("BTF.ext in non-native endianness is not supported\n"); - return -ENOTSUP; + btf_ext->swapped_endian = true; + hdr_len = bswap_32(hdr_len); } else if (hdr->magic != BTF_MAGIC) { pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); return -EINVAL; }
+ if (data_size < hdr_len) { + pr_debug("BTF.ext header not found\n"); + return -EINVAL; + } + + /* Maintain native byte-order in memory for introspection */ + if (btf_ext->swapped_endian) + btf_ext_bswap_hdr(hdr); + if (hdr->version != BTF_VERSION) { pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); return -ENOTSUP; @@ -3066,6 +3144,7 @@ void btf_ext__free(struct btf_ext *btf_ext) free(btf_ext->line_info.sec_idxs); free(btf_ext->core_relo_info.sec_idxs); free(btf_ext->data); + free(btf_ext->data_swapped); free(btf_ext); }
@@ -3086,7 +3165,12 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) } memcpy(btf_ext->data, data, size);
- err = btf_ext_parse_hdr(btf_ext->data, size); + err = btf_ext_parse_hdr(btf_ext); + if (err) + goto done; + + if (btf_ext->swapped_endian) + err = btf_ext_bswap_info(btf_ext->data, btf_ext->data_size); if (err) goto done;
@@ -3119,15 +3203,76 @@ struct btf_ext *btf_ext__new(const __u8 *data, __u32 size) return btf_ext; }
+static void *btf_ext_raw_data(const struct btf_ext *btf_ext_ro, __u32 *size, bool swap_endian) +{ + struct btf_ext *btf_ext = (struct btf_ext *)btf_ext_ro; + const __u32 data_sz = btf_ext->data_size; + void *data; + int err; + + data = swap_endian ? btf_ext->data_swapped : btf_ext->data; + if (data) { + *size = data_sz; + return data; + } + + data = calloc(1, data_sz); + if (!data) + return NULL; + memcpy(data, btf_ext->data, data_sz); + + if (swap_endian) { + err = btf_ext_bswap_info(data, data_sz); + if (err) { + free(data); + return NULL; + } + btf_ext_bswap_hdr(data); + btf_ext->data_swapped = data; + } + + *size = data_sz; + return data; +} + const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size) { + __u32 data_sz; + void *data; + + data = btf_ext_raw_data(btf_ext, &data_sz, btf_ext->swapped_endian); + if (!data) + return errno = ENOMEM, NULL; + *size = btf_ext->data_size; - return btf_ext->data; + return data; }
__attribute__((alias("btf_ext__raw_data"))) const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
+enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext) +{ + if (is_host_big_endian()) + return btf_ext->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN; + else + return btf_ext->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; +} + +int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian) +{ + if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN) + return libbpf_err(-EINVAL); + + btf_ext->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN); + if (!btf_ext->swapped_endian) { + free(btf_ext->data_swapped); + btf_ext->data_swapped = NULL; + } + return 0; +} + +
struct btf_dedup;
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index b68d216837a9..8c4cbaba6194 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -167,6 +167,8 @@ LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size); +LIBBPF_API enum btf_endianness btf_ext__endianness(const struct btf_ext *btf_ext); +LIBBPF_API int btf_ext__set_endianness(struct btf_ext *btf_ext, enum btf_endianness endian);
LIBBPF_API int btf__find_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_str(struct btf *btf, const char *s); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8f0d9ea3b1b4..5c17632807b6 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -421,6 +421,8 @@ LIBBPF_1.5.0 { global: btf__distill_base; btf__relocate; + btf_ext__endianness; + btf_ext__set_endianness; bpf_map__autoattach; bpf_map__set_autoattach; bpf_program__attach_sockmap; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 8cda511a1982..6b0270c83537 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -484,6 +484,8 @@ struct btf_ext { struct btf_ext_header *hdr; void *data; }; + void *data_swapped; + bool swapped_endian; struct btf_ext_info func_info; struct btf_ext_info line_info; struct btf_ext_info core_relo_info;
Allow bpf_object__open() to access files of either endianness, and convert included BPF programs to native byte-order in-memory for introspection.
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/libbpf.c | 21 +++++++++++++++++++-- tools/lib/bpf/libbpf_internal.h | 11 +++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 8a0a0c1e37e1..a542031f4f73 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -940,6 +940,21 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, return 0; }
+static void bpf_object_bswap_progs(struct bpf_object *obj) +{ + struct bpf_program *prog = obj->programs; + struct bpf_insn *insn; + int p, i; + + for (p = 0; p < obj->nr_programs; p++, prog++) { + insn = prog->insns; + for (i = 0; i < prog->insns_cnt; i++, insn++) + bpf_insn_bswap(insn); + pr_debug("prog '%s': converted %zu insns to native byteorder\n", + prog->name, prog->insns_cnt); + } +} + static const struct btf_member * find_member_by_offset(const struct btf_type *t, __u32 bit_offset) { @@ -1610,7 +1625,6 @@ static int bpf_object__check_endianness(struct bpf_object *obj) #else # error "Unrecognized __BYTE_ORDER__" #endif - pr_warn("elf: endianness mismatch in %s.\n", obj->path); return -LIBBPF_ERRNO__ENDIAN; }
@@ -3953,6 +3967,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj) return -LIBBPF_ERRNO__FORMAT; }
+ /* change BPF program insns to native endianness for introspection */ + if (bpf_object__check_endianness(obj)) + bpf_object_bswap_progs(obj); + /* sort BPF programs by section name and in-section instruction offset * for faster search */ @@ -7993,7 +8011,6 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, }
err = bpf_object__elf_init(obj); - err = err ? : bpf_object__check_endianness(obj); err = err ? : bpf_object__elf_collect(obj); err = err ? : bpf_object__collect_externs(obj); err = err ? : bpf_object_fixup_btf(obj); diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 6b0270c83537..f53daa601c6f 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -11,6 +11,7 @@
#include <stdlib.h> #include <limits.h> +#include <byteswap.h> #include <errno.h> #include <linux/err.h> #include <fcntl.h> @@ -590,6 +591,16 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn) return insn->code == (BPF_LD | BPF_IMM | BPF_DW); }
+static inline void bpf_insn_bswap(struct bpf_insn *insn) +{ + /* dst_reg & src_reg nibbles */ + __u8 *regs = (__u8 *)insn + offsetofend(struct bpf_insn, code); + + *regs = (*regs >> 4) | (*regs << 4); + insn->off = bswap_16(insn->off); + insn->imm = bswap_32(insn->imm); +} + /* Unconditionally dup FD, ensuring it doesn't use [0, 2] range. * Original FD is not closed or altered in any other way. * Preserves original FD value, if it's invalid (negative).
On Wed, Aug 21, 2024 at 2:10 AM Tony Ambardar tony.ambardar@gmail.com wrote:
+static inline void bpf_insn_bswap(struct bpf_insn *insn) +{
/* dst_reg & src_reg nibbles */
__u8 *regs = (__u8 *)insn + offsetofend(struct bpf_insn, code);
*regs = (*regs >> 4) | (*regs << 4);
insn->off = bswap_16(insn->off);
insn->imm = bswap_32(insn->imm);
+}
This is really great! Thank you for working on it.
This idea was brought up a couple times, since folks want to compile bpf prog once, embed it into their user space binary, and auto adjust to target endianness. Cross compilation isn't important to them, but the ability to embed a single .o instead of two .o-s is a big win.
It's great that the above insn, elf and btf adjustments are working. Since endianness is encoded in elf what's the point of extra btf_ext__endianness libbpf api? Aren't elf and btf.ext suppose to be in the same endianness all the time?
On Wed, Aug 21, 2024 at 06:55:58PM -0700, Alexei Starovoitov wrote:
On Wed, Aug 21, 2024 at 2:10 AM Tony Ambardar tony.ambardar@gmail.com wrote:
+static inline void bpf_insn_bswap(struct bpf_insn *insn) +{
/* dst_reg & src_reg nibbles */
__u8 *regs = (__u8 *)insn + offsetofend(struct bpf_insn, code);
*regs = (*regs >> 4) | (*regs << 4);
insn->off = bswap_16(insn->off);
insn->imm = bswap_32(insn->imm);
+}
This is really great! Thank you for working on it.
Happy to help! The endian restrictions were a long-time annoyance for me.
This idea was brought up a couple times, since folks want to compile bpf prog once, embed it into their user space binary, and auto adjust to target endianness. Cross compilation isn't important to them, but the ability to embed a single .o instead of two .o-s is a big win.
Ah, interesting use case. I hadn't really considered that or tested it. I suppose .symtab and .rel* have ELF types so OK, .strtab doesn't matter, and now we have BTF/BTF.ext converters, so why not? Something like light skeleton might be a problem though, because the data blob is heterogeneous and would be hard to convert byte-order after writing.
It's great that the above insn, elf and btf adjustments are working. Since endianness is encoded in elf what's the point of extra btf_ext__endianness libbpf api? Aren't elf and btf.ext suppose to be in the same endianness all the time?
I implemented BTF.ext following the BTF endianness API example, which handles raw BTF, in-memory, and not just ELF object files. With BTF, we have API clients like pahole, but only internal usage so far for BTF.ext, and no notion of "raw" BTF.ext. I suppose exposing an API for btf_ext__endianness isn't strictly needed right now, but I can imagine BTF-processing clients using it. What are your thoughts, Andrii?
BTW, I just fixed a bug in my light skeleton code that made test_progs 'map_ptr' fail, so will be sending out a v2 patch.
Currently, I have only 2 unexpected test failures on s390x:
subtest_userns:PASS:socketpair 0 nsec subtest_userns:PASS:fork 0 nsec recvfd:PASS:recvmsg 0 nsec recvfd:PASS:cmsg_null 0 nsec recvfd:PASS:cmsg_len 0 nsec recvfd:PASS:cmsg_level 0 nsec recvfd:PASS:cmsg_type 0 nsec parent:PASS:recv_bpffs_fd 0 nsec materialize_bpffs_fd:PASS:fs_cfg_cmds 0 nsec materialize_bpffs_fd:PASS:fs_cfg_maps 0 nsec materialize_bpffs_fd:PASS:fs_cfg_progs 0 nsec materialize_bpffs_fd:PASS:fs_cfg_attachs 0 nsec parent:PASS:materialize_bpffs_fd 0 nsec sendfd:PASS:sendmsg 0 nsec parent:PASS:send_mnt_fd 0 nsec recvfd:PASS:recvmsg 0 nsec recvfd:PASS:cmsg_null 0 nsec recvfd:PASS:cmsg_len 0 nsec recvfd:PASS:cmsg_level 0 nsec recvfd:PASS:cmsg_type 0 nsec parent:PASS:recv_token_fd 0 nsec parent:FAIL:waitpid_child unexpected error: 22 (errno 3) #402/9 token/obj_priv_implicit_token_envvar:FAIL
and
libbpf: prog 'on_event': BPF program load failed: Bad address libbpf: prog 'on_event': -- BEGIN PROG LOAD LOG -- The sequence of 8193 jumps is too complex. verification time 2633000 usec stack depth 360 processed 116096 insns (limit 1000000) max_states_per_insn 1 total_states 5061 peak_states 5061 mark_read 2540 -- END PROG LOAD LOG -- libbpf: prog 'on_event': failed to load: -14 libbpf: failed to load object 'pyperf600.bpf.o' scale_test:FAIL:expect_success unexpected error: -14 (errno 14) #525 verif_scale_pyperf600:FAIL
I'd appreciate any thoughts on troubleshooting these, and will continue looking into them.
Cheers, Tony
Allow static linking object files of either endianness, checking that input files have consistent byte-order, and setting output endianness from input.
Linking requires in-memory processing of programs, relocations, sections, etc. in native endianness, and output conversion to target byte-order. This is enabled by built-in ELF translation and recent BTF/BTF.ext endianness functions. Further add local functions for swapping byte-order of sections containing BPF insns.
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/linker.c | 106 ++++++++++++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 16 deletions(-)
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 7489306cd6f7..778b2b9d65a2 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -135,6 +135,7 @@ struct bpf_linker { int fd; Elf *elf; Elf64_Ehdr *elf_hdr; + bool swapped_endian;
/* Output sections metadata */ struct dst_sec *secs; @@ -324,13 +325,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
linker->elf_hdr->e_machine = EM_BPF; linker->elf_hdr->e_type = ET_REL; -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB; -#else -#error "Unknown __BYTE_ORDER__" -#endif + /* Set unknown ELF endianness, assign later from input files */ + linker->elf_hdr->e_ident[EI_DATA] = ELFDATANONE;
/* STRTAB */ /* initialize strset with an empty string to conform to ELF */ @@ -541,19 +537,21 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, const struct bpf_linker_file_opts *opts, struct src_obj *obj) { -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - const int host_endianness = ELFDATA2LSB; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - const int host_endianness = ELFDATA2MSB; -#else -#error "Unknown __BYTE_ORDER__" -#endif int err = 0; Elf_Scn *scn; Elf_Data *data; Elf64_Ehdr *ehdr; Elf64_Shdr *shdr; struct src_sec *sec; + unsigned char obj_byteorder; + unsigned char *link_byteorder = &linker->elf_hdr->e_ident[EI_DATA]; +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + const unsigned char host_byteorder = ELFDATA2LSB; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + const unsigned char host_byteorder = ELFDATA2MSB; +#else +#error "Unknown __BYTE_ORDER__" +#endif
pr_debug("linker: adding object file '%s'...\n", filename);
@@ -579,11 +577,25 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, pr_warn_elf("failed to get ELF header for %s", filename); return err; } - if (ehdr->e_ident[EI_DATA] != host_endianness) { + + /* Linker output endianness set by first input object */ + obj_byteorder = ehdr->e_ident[EI_DATA]; + if (obj_byteorder != ELFDATA2LSB && obj_byteorder != ELFDATA2MSB) { err = -EOPNOTSUPP; - pr_warn_elf("unsupported byte order of ELF file %s", filename); + pr_warn("linker: unknown byte order of ELF file %s\n", filename); return err; } + if (*link_byteorder == ELFDATANONE) { + *link_byteorder = obj_byteorder; + linker->swapped_endian = obj_byteorder != host_byteorder; + pr_debug("linker: set %s-endian output byte order\n", + obj_byteorder == ELFDATA2MSB ? "big" : "little"); + } else if (*link_byteorder != obj_byteorder) { + err = -EOPNOTSUPP; + pr_warn("linker: byte order mismatch with ELF file %s\n", filename); + return err; + } + if (ehdr->e_type != ET_REL || ehdr->e_machine != EM_BPF || ehdr->e_ident[EI_CLASS] != ELFCLASS64) { @@ -1111,6 +1123,27 @@ static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec return true; }
+static bool is_exec_sec(struct dst_sec *sec) +{ + if (!sec || sec->ephemeral) + return false; + return (sec->shdr->sh_type == SHT_PROGBITS) && + (sec->shdr->sh_flags & SHF_EXECINSTR); +} + +static int exec_sec_bswap(void *raw_data, int size) +{ + const int insn_cnt = size / sizeof(struct bpf_insn); + struct bpf_insn *insn = raw_data; + int i; + + if (size % sizeof(struct bpf_insn)) + return -EINVAL; + for (i = 0; i < insn_cnt; i++, insn++) + bpf_insn_bswap(insn); + return 0; +} + static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src) { void *tmp; @@ -1170,6 +1203,16 @@ static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz); /* now copy src data at a properly aligned offset */ memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size); + + /* convert added bpf insns to native byte-order */ + if (linker->swapped_endian && is_exec_sec(dst)) { + err = exec_sec_bswap(dst->raw_data + dst_align_sz, + src->shdr->sh_size); + if (err) { + pr_warn("%s: error changing insns endianness\n", __func__); + return err; + } + } }
dst->sec_sz = dst_final_sz; @@ -2630,6 +2673,14 @@ int bpf_linker__finalize(struct bpf_linker *linker) if (!sec->scn) continue;
+ /* restore sections with bpf insns to target byte-order */ + if (linker->swapped_endian && is_exec_sec(sec)) { + err = exec_sec_bswap(sec->raw_data, sec->sec_sz); + if (err) { + pr_warn("error finalizing insns endianness\n"); + return libbpf_err(err); + } + } sec->data->d_buf = sec->raw_data; }
@@ -2696,6 +2747,13 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name, return 0; }
+static inline enum btf_endianness +linker_btf_endianness(const struct bpf_linker *linker) +{ + unsigned char byteorder = linker->elf_hdr->e_ident[EI_DATA]; + return byteorder == ELFDATA2MSB ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN; +} + static int finalize_btf(struct bpf_linker *linker) { LIBBPF_OPTS(btf_dedup_opts, opts); @@ -2742,6 +2800,22 @@ static int finalize_btf(struct bpf_linker *linker) return err; }
+ /* Set .BTF and .BTF.ext output byte order */ + err = btf__set_endianness(linker->btf, + linker_btf_endianness(linker)); + if (err) { + pr_warn("failed to set .BTF output endianness: %d\n", err); + return err; + } + if (linker->btf_ext) { + err = btf_ext__set_endianness(linker->btf_ext, + linker_btf_endianness(linker)); + if (err) { + pr_warn("failed to set .BTF.ext output endianness: %d\n", err); + return err; + } + } + /* Emit .BTF section */ raw_data = btf__raw_data(linker->btf, &raw_sz); if (!raw_data)
Track target endianness in 'struct bpf_gen' and process in-memory data in native byte-order, but on finalization convert the embedded loader BPF insns to target endianness.
The light skeleton also includes a target-accessed data blob which is heterogeneous and thus difficult to convert to target byte-order on finalization. Add support functions to convert data to target endianness as it is added to the blob.
Also add additional debug logging for data blob structure details and skeleton loading.
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/lib/bpf/bpf_gen_internal.h | 1 + tools/lib/bpf/gen_loader.c | 179 ++++++++++++++++++++++++------- tools/lib/bpf/libbpf.c | 1 + tools/lib/bpf/skel_internal.h | 3 +- 4 files changed, 143 insertions(+), 41 deletions(-)
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h index fdf44403ff36..6ff963a491d9 100644 --- a/tools/lib/bpf/bpf_gen_internal.h +++ b/tools/lib/bpf/bpf_gen_internal.h @@ -34,6 +34,7 @@ struct bpf_gen { void *data_cur; void *insn_start; void *insn_cur; + bool swapped_endian; ssize_t cleanup_label; __u32 nr_progs; __u32 nr_maps; diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index cf3323fd47b8..d9d396946977 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -401,6 +401,15 @@ int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) opts->insns_sz = gen->insn_cur - gen->insn_start; opts->data = gen->data_start; opts->data_sz = gen->data_cur - gen->data_start; + + /* use target endianness for embedded loader */ + if (gen->swapped_endian) { + struct bpf_insn *insn = (struct bpf_insn *)opts->insns; + int insn_cnt = opts->insns_sz / sizeof(struct bpf_insn); + + for (i = 0; i < insn_cnt; i++) + bpf_insn_bswap(insn++); + } } return gen->error; } @@ -414,6 +423,33 @@ void bpf_gen__free(struct bpf_gen *gen) free(gen); }
+/* + * Fields of bpf_attr are set to values in native byte-order before being + * written to the target-bound data blob, and may need endian conversion. + * This macro allows setting the correct value in situ and is simpler than + * writing a separate converter for *all fields* of *all records* included + * in union bpf_attr. + */ +#define move_tgt_endian(lval, rval) { \ + if (!gen->swapped_endian) \ + lval = (rval); \ + else \ + switch (sizeof(lval)) { \ + case 2: \ + lval = bswap_16(rval); \ + break; \ + case 4: \ + lval = bswap_32(rval); \ + break; \ + case 8: \ + lval = bswap_64(rval); \ + break; \ + default: \ + lval = (rval); \ + pr_warn("unsupported bswap size!\n"); \ + } \ + } + void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, __u32 btf_raw_size) { @@ -422,11 +458,13 @@ void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, union bpf_attr attr;
memset(&attr, 0, attr_size); - pr_debug("gen: load_btf: size %d\n", btf_raw_size); btf_data = add_data(gen, btf_raw_data, btf_raw_size); + pr_debug("gen: load_btf: off %d size %d\n", btf_data, btf_raw_size);
- attr.btf_size = btf_raw_size; + move_tgt_endian(attr.btf_size, btf_raw_size); btf_load_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: load_btf: btf_load_attr: off %d size %d\n", + btf_load_attr, attr_size);
/* populate union bpf_attr with user provided log details */ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4, @@ -457,23 +495,25 @@ void bpf_gen__map_create(struct bpf_gen *gen, union bpf_attr attr;
memset(&attr, 0, attr_size); - attr.map_type = map_type; - attr.key_size = key_size; - attr.value_size = value_size; - attr.map_flags = map_attr->map_flags; - attr.map_extra = map_attr->map_extra; + move_tgt_endian(attr.map_type, map_type); + move_tgt_endian(attr.key_size, key_size); + move_tgt_endian(attr.value_size, value_size); + move_tgt_endian(attr.map_flags, map_attr->map_flags); + move_tgt_endian(attr.map_extra, map_attr->map_extra); if (map_name) libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); - attr.numa_node = map_attr->numa_node; - attr.map_ifindex = map_attr->map_ifindex; - attr.max_entries = max_entries; - attr.btf_key_type_id = map_attr->btf_key_type_id; - attr.btf_value_type_id = map_attr->btf_value_type_id; + move_tgt_endian(attr.numa_node, map_attr->numa_node); + move_tgt_endian(attr.map_ifindex, map_attr->map_ifindex); + move_tgt_endian(attr.max_entries, max_entries); + move_tgt_endian(attr.btf_key_type_id, map_attr->btf_key_type_id); + move_tgt_endian(attr.btf_value_type_id, map_attr->btf_value_type_id);
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n", - attr.map_name, map_idx, map_type, attr.btf_value_type_id); + attr.map_name, map_idx, map_type, map_attr->btf_value_type_id);
map_create_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: map_create: map_create_attr: off %d size %d\n", + map_create_attr, attr_size); if (attr.btf_value_type_id) /* populate union bpf_attr with btf_fd saved in the stack earlier */ move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4, @@ -784,12 +824,12 @@ static void emit_relo_ksym_typeless(struct bpf_gen *gen, emit_ksym_relo_log(gen, relo, kdesc->ref); }
-static __u32 src_reg_mask(void) +static __u32 src_reg_mask(struct bpf_gen *gen) { -#if defined(__LITTLE_ENDIAN_BITFIELD) - return 0x0f; /* src_reg,dst_reg,... */ -#elif defined(__BIG_ENDIAN_BITFIELD) - return 0xf0; /* dst_reg,src_reg,... */ +#if defined(__LITTLE_ENDIAN_BITFIELD) /* src_reg,dst_reg,... */ + return gen->swapped_endian ? 0xf0 : 0x0f; +#elif defined(__BIG_ENDIAN_BITFIELD) /* dst_reg,src_reg,... */ + return gen->swapped_endian ? 0x0f : 0xf0; #else #error "Unsupported bit endianness, cannot proceed" #endif @@ -840,7 +880,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3)); clear_src_reg: /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ - reg_mask = src_reg_mask(); + reg_mask = src_reg_mask(gen); emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask)); emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); @@ -931,11 +971,34 @@ static void cleanup_relos(struct bpf_gen *gen, int insns) cleanup_core_relo(gen); }
+/* Covert func, line, and core relo info records to target endianness, + * checking the blob size is consistent with 32-bit fields. + */ +static void info_blob_bswap(struct bpf_gen *gen, int info_off, __u32 size) +{ + __u32 *field = gen->data_start + info_off; + int i, cnt = size / sizeof(__u32); + + if (size && size % sizeof(__u32)) { + pr_warn("info records not using 32-bit fields!\n"); + return; + } + if (gen->swapped_endian) + for (i = 0; i < cnt; i++, field++) + *field = bswap_32(*field); +} + void bpf_gen__prog_load(struct bpf_gen *gen, enum bpf_prog_type prog_type, const char *prog_name, const char *license, struct bpf_insn *insns, size_t insn_cnt, struct bpf_prog_load_opts *load_attr, int prog_idx) { + int func_info_tot_sz = load_attr->func_info_cnt * + load_attr->func_info_rec_size; + int line_info_tot_sz = load_attr->line_info_cnt * + load_attr->line_info_rec_size; + int core_relo_tot_sz = gen->core_relo_cnt * + sizeof(struct bpf_core_relo); int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); union bpf_attr attr; @@ -947,32 +1010,60 @@ void bpf_gen__prog_load(struct bpf_gen *gen, license_off = add_data(gen, license, strlen(license) + 1); /* add insns to blob of bytes */ insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn)); + pr_debug("gen: prog_load: license off %d insn off %d\n", + license_off, insns_off);
- attr.prog_type = prog_type; - attr.expected_attach_type = load_attr->expected_attach_type; - attr.attach_btf_id = load_attr->attach_btf_id; - attr.prog_ifindex = load_attr->prog_ifindex; - attr.kern_version = 0; - attr.insn_cnt = (__u32)insn_cnt; - attr.prog_flags = load_attr->prog_flags; - - attr.func_info_rec_size = load_attr->func_info_rec_size; - attr.func_info_cnt = load_attr->func_info_cnt; - func_info = add_data(gen, load_attr->func_info, - attr.func_info_cnt * attr.func_info_rec_size); + /* convert blob insns to target endianness */ + if (gen->swapped_endian) { + struct bpf_insn *insn = gen->data_start + insns_off; + int i;
- attr.line_info_rec_size = load_attr->line_info_rec_size; - attr.line_info_cnt = load_attr->line_info_cnt; - line_info = add_data(gen, load_attr->line_info, - attr.line_info_cnt * attr.line_info_rec_size); + for (i = 0; i < insn_cnt; i++, insn++) + bpf_insn_bswap(insn); + }
- attr.core_relo_rec_size = sizeof(struct bpf_core_relo); - attr.core_relo_cnt = gen->core_relo_cnt; - core_relos = add_data(gen, gen->core_relos, - attr.core_relo_cnt * attr.core_relo_rec_size); + move_tgt_endian(attr.prog_type, prog_type); + move_tgt_endian(attr.expected_attach_type, load_attr->expected_attach_type); + move_tgt_endian(attr.attach_btf_id, load_attr->attach_btf_id); + move_tgt_endian(attr.prog_ifindex, load_attr->prog_ifindex); + attr.kern_version = 0; + move_tgt_endian(attr.insn_cnt, (__u32)insn_cnt); + move_tgt_endian(attr.prog_flags, load_attr->prog_flags); + + move_tgt_endian(attr.func_info_rec_size, load_attr->func_info_rec_size); + move_tgt_endian(attr.func_info_cnt, load_attr->func_info_cnt); + func_info = add_data(gen, load_attr->func_info, func_info_tot_sz); + pr_debug("gen: prog_load: func_info: off %d cnt %d rec size %d\n", + func_info, load_attr->func_info_cnt, + load_attr->func_info_rec_size); + + /* convert info blob fields to target endianness */ + info_blob_bswap(gen, func_info, func_info_tot_sz); + + move_tgt_endian(attr.line_info_rec_size, load_attr->line_info_rec_size); + move_tgt_endian(attr.line_info_cnt, load_attr->line_info_cnt); + line_info = add_data(gen, load_attr->line_info, line_info_tot_sz); + pr_debug("gen: prog_load: line_info: off %d cnt %d rec size %d\n", + line_info, load_attr->line_info_cnt, + load_attr->line_info_rec_size); + + /* convert info blob fields to target endianness */ + info_blob_bswap(gen, line_info, line_info_tot_sz); + + move_tgt_endian(attr.core_relo_rec_size, sizeof(struct bpf_core_relo)); + move_tgt_endian(attr.core_relo_cnt, gen->core_relo_cnt); + core_relos = add_data(gen, gen->core_relos, core_relo_tot_sz); + pr_debug("gen: prog_load: core_relos: off %d cnt %d rec size %zd\n", + core_relos, gen->core_relo_cnt, + sizeof(struct bpf_core_relo)); + + /* convert info blob fields to target endianness */ + info_blob_bswap(gen, core_relos, core_relo_tot_sz);
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); prog_load_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: prog_load: prog_load_attr: off %d size %d\n", + prog_load_attr, attr_size);
/* populate union bpf_attr with a pointer to license */ emit_rel_store(gen, attr_field(prog_load_attr, license), license_off); @@ -1068,6 +1159,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
map_update_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: map_update_elem: map_update_attr: off %d size %d\n", + map_update_attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, blob_fd_array_off(gen, map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); @@ -1084,14 +1177,18 @@ void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slo int attr_size = offsetofend(union bpf_attr, flags); int map_update_attr, key; union bpf_attr attr; + int tgt_slot;
memset(&attr, 0, attr_size); pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n", outer_map_idx, slot, inner_map_idx);
- key = add_data(gen, &slot, sizeof(slot)); + move_tgt_endian(tgt_slot, slot); + key = add_data(gen, &tgt_slot, sizeof(tgt_slot));
map_update_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: populate_outer_map: map_update_attr: off %d size %d\n", + map_update_attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, blob_fd_array_off(gen, outer_map_idx)); emit_rel_store(gen, attr_field(map_update_attr, key), key); @@ -1114,6 +1211,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) memset(&attr, 0, attr_size); pr_debug("gen: map_freeze: idx %d\n", map_idx); map_freeze_attr = add_data(gen, &attr, attr_size); + pr_debug("gen: map_freeze: map_update_attr: off %d size %d\n", + map_freeze_attr, attr_size); move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4, blob_fd_array_off(gen, map_idx)); /* emit MAP_FREEZE command */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a542031f4f73..8b6c212eb9a3 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9106,6 +9106,7 @@ int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) if (!gen) return -ENOMEM; gen->opts = opts; + gen->swapped_endian = bpf_object__check_endianness(obj); obj->gen_loader = gen; return 0; } diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 1e82ab06c3eb..67e8477ecb5b 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -351,10 +351,11 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) attr.test.ctx_size_in = opts->ctx->sz; err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz); if (err < 0 || (int)attr.test.retval < 0) { - opts->errstr = "failed to execute loader prog"; if (err < 0) { + opts->errstr = "failed to execute loader prog"; set_err; } else { + opts->errstr = "error returned by loader prog"; err = (int)attr.test.retval; #ifndef __KERNEL__ errno = -err;
Update Makefile build rules to compile BPF programs with target endianness rather than host byte-order. With recent changes, this allows building the full selftests/bpf suite hosted on x86_64 and targeting s390x or mips64eb for example.
Signed-off-by: Tony Ambardar tony.ambardar@gmail.com --- tools/testing/selftests/bpf/Makefile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 3b9f345f7909..69ec6960af42 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -402,6 +402,7 @@ endef IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') MENDIAN=$(if $(IS_LITTLE_ENDIAN),-mlittle-endian,-mbig-endian) +BPF_TARGET_ENDIAN=$(if $(IS_LITTLE_ENDIAN),--target=bpfel,--target=bpfeb)
ifneq ($(CROSS_COMPILE),) CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%)) @@ -429,17 +430,17 @@ $(OUTPUT)/cgroup_getset_retval_hooks.o: cgroup_getset_retval_hooks.h # $4 - binary name define CLANG_BPF_BUILD_RULE $(call msg,CLNG-BPF,$4,$2) - $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v3 -o $2 + $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v3 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 define CLANG_NOALU32_BPF_BUILD_RULE $(call msg,CLNG-BPF,$4,$2) - $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v2 -o $2 + $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v2 -o $2 endef # Similar to CLANG_BPF_BUILD_RULE, but with cpu-v4 define CLANG_CPUV4_BPF_BUILD_RULE $(call msg,CLNG-BPF,$4,$2) - $(Q)$(CLANG) $3 -O2 --target=bpf -c $1 -mcpu=v4 -o $2 + $(Q)$(CLANG) $3 -O2 $(BPF_TARGET_ENDIAN) -c $1 -mcpu=v4 -o $2 endef # Build BPF object using GCC define GCC_BPF_BUILD_RULE
linux-kselftest-mirror@lists.linaro.org