Le 06/02/2023 à 14:45, Sasha Levin a écrit :
> This is a note to let you know that I've just added the patch titled
>
> powerpc/bpf: Move common helpers into bpf_jit.h
>
> to the 5.10-stable tree which can be found at:
> http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=sum…
>
> The filename of the patch is:
> powerpc-bpf-move-common-helpers-into-bpf_jit.h.patch
> and it can be found in the queue-5.10 subdirectory.
>
> If you, or anyone else, feels it should not be added to the stable tree,
> please let <stable(a)vger.kernel.org> know about it.
>
This commit was part of a series that removed classical BPF and
implemented EBPF on powerpc32.
At that time PPC64 already had EBPF.
This commit is to make some parts of the code common for PPC32 and PPC64
because at the end of the series both are EBPF.
But I doubt this commit alone can be applied without first removing
classical BPF (commit 6944caad78fc ("powerpc/bpf: Remove classical BPF
support for PPC32"))
Christophe
>
>
> commit 76f74e69efb95e4add82e5b687e062f2c989739f
> Author: Christophe Leroy <christophe.leroy(a)csgroup.eu>
> Date: Mon Mar 22 16:37:48 2021 +0000
>
> powerpc/bpf: Move common helpers into bpf_jit.h
>
> [ Upstream commit f1b1583d5faa86cb3dcb7b740594868debad7c30 ]
>
> Move functions bpf_flush_icache(), bpf_is_seen_register() and
> bpf_set_seen_register() in order to reuse them in future
> bpf_jit_comp32.c
>
> Signed-off-by: Christophe Leroy <christophe.leroy(a)csgroup.eu>
> Signed-off-by: Michael Ellerman <mpe(a)ellerman.id.au>
> Link: https://lore.kernel.org/r/28e8d5a75e64807d7e9d39a4b52658755e259f8c.16164309…
> Stable-dep-of: 71f656a50176 ("bpf: Fix to preserve reg parent/live fields when copying range info")
> Signed-off-by: Sasha Levin <sashal(a)kernel.org>
>
> diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
> index 1a5b4da8a235..cd9aab6ec2c5 100644
> --- a/arch/powerpc/net/bpf_jit.h
> +++ b/arch/powerpc/net/bpf_jit.h
> @@ -117,6 +117,41 @@
> #define COND_LT (CR0_LT | COND_CMP_TRUE)
> #define COND_LE (CR0_GT | COND_CMP_FALSE)
>
> +#define SEEN_FUNC 0x1000 /* might call external helpers */
> +#define SEEN_STACK 0x2000 /* uses BPF stack */
> +#define SEEN_TAILCALL 0x4000 /* uses tail calls */
> +
> +struct codegen_context {
> + /*
> + * This is used to track register usage as well
> + * as calls to external helpers.
> + * - register usage is tracked with corresponding
> + * bits (r3-r10 and r27-r31)
> + * - rest of the bits can be used to track other
> + * things -- for now, we use bits 16 to 23
> + * encoded in SEEN_* macros above
> + */
> + unsigned int seen;
> + unsigned int idx;
> + unsigned int stack_size;
> +};
> +
> +static inline void bpf_flush_icache(void *start, void *end)
> +{
> + smp_wmb(); /* smp write barrier */
> + flush_icache_range((unsigned long)start, (unsigned long)end);
> +}
> +
> +static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
> +{
> + return ctx->seen & (1 << (31 - i));
> +}
> +
> +static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
> +{
> + ctx->seen |= 1 << (31 - i);
> +}
> +
> #endif
>
> #endif
> diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
> index 4d164e865b39..201b83bfa869 100644
> --- a/arch/powerpc/net/bpf_jit64.h
> +++ b/arch/powerpc/net/bpf_jit64.h
> @@ -86,25 +86,6 @@ static const int b2p[] = {
> } while(0)
> #define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0)
>
> -#define SEEN_FUNC 0x1000 /* might call external helpers */
> -#define SEEN_STACK 0x2000 /* uses BPF stack */
> -#define SEEN_TAILCALL 0x4000 /* uses tail calls */
> -
> -struct codegen_context {
> - /*
> - * This is used to track register usage as well
> - * as calls to external helpers.
> - * - register usage is tracked with corresponding
> - * bits (r3-r10 and r27-r31)
> - * - rest of the bits can be used to track other
> - * things -- for now, we use bits 16 to 23
> - * encoded in SEEN_* macros above
> - */
> - unsigned int seen;
> - unsigned int idx;
> - unsigned int stack_size;
> -};
> -
> #endif /* !__ASSEMBLY__ */
>
> #endif
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index 7da59ddc90dd..ebad2c79cd6f 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -24,22 +24,6 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
> memset32(area, BREAKPOINT_INSTRUCTION, size/4);
> }
>
> -static inline void bpf_flush_icache(void *start, void *end)
> -{
> - smp_wmb();
> - flush_icache_range((unsigned long)start, (unsigned long)end);
> -}
> -
> -static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
> -{
> - return ctx->seen & (1 << (31 - i));
> -}
> -
> -static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
> -{
> - ctx->seen |= 1 << (31 - i);
> -}
> -
> static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
> {
> /*