The existing alternative_insn macro has some limitations that make it hard to work with. In partiuclar the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
Introduce an alternative set of macros that allows instructions to be presented to the assembler as normal and switch everything over to the new macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org ---
Notes: To be honest these if not/else/endif macros are simply more readable than the original macro and that might be enough to justify them on their own. However below is an example that is needlessly hard to write without them because ICC_PMR_EL1 is a C pre-processor macro.
.macro disable_irq, tmp mov \tmp, #ICC_PMR_EL1_MASKED alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF msr daifset, #2 alternative_else msr_s ICC_PMR_EL1, \tmp alternative_endif .endm
The new macros have received a fair degree of testing because I have based my (not published since March) pseudo-NMI patch set on them.
arch/arm64/include/asm/alternative.h | 18 ++++++++++++------ arch/arm64/kernel/entry.S | 29 +++++++++++++---------------- arch/arm64/kvm/hyp.S | 12 ++++++++++-- arch/arm64/mm/cache.S | 7 ++++++- 4 files changed, 41 insertions(+), 25 deletions(-)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..8c8cdfac7251 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -65,13 +65,19 @@ void free_alternatives_memory(void); .byte \alt_len .endm
-.macro alternative_insn insn1 insn2 cap -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f +.macro alternative_if_not cap + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f .popsection - .pushsection .altinstr_replacement, "ax" -663: \insn2 +661: +.endm + +.macro alternative_else +662: .pushsection .altinstr_replacement, "ax" +663: +.endm + +.macro alternative_endif 664: .popsection .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7691a378668..be8a70d4028c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -122,26 +122,23 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 - #ifdef CONFIG_ARM64_ERRATUM_845719 - -#undef SEQUENCE_ORG -#undef SEQUENCE_ALT - +alternative_if_not ARM64_WORKAROUND_845719 + nop + nop #ifdef CONFIG_PID_IN_CONTEXTIDR - -#define SEQUENCE_ORG "nop ; nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:" - + nop +#endif +alternative_else + tbz x22, #4, 1f +#ifdef CONFIG_PID_IN_CONTEXTIDR + mrs x29, contextidr_el1 + msr contextidr_el1, x29 #else - -#define SEQUENCE_ORG "nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:" - + msr contextidr_el1, xzr #endif - - alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719 - +1: +alternative_endif #endif .endif msr elr_el1, x21 // set up the return data diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 17a8fb14f428..10915aaf0b01 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -810,7 +810,11 @@ * Call into the vgic backend for state saving */ .macro save_vgic_state - alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __save_vgic_v2_state +alternative_else + bl __save_vgic_v3_state +alternative_endif mrs x24, hcr_el2 mov x25, #HCR_INT_OVERRIDE neg x25, x25 @@ -827,7 +831,11 @@ orr x24, x24, #HCR_INT_OVERRIDE orr x24, x24, x25 msr hcr_el2, x24 - alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __restore_vgic_v2_state +alternative_else + bl __restore_vgic_v3_state +alternative_endif .endm
.macro save_timer_state diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index bdeb5d38c2dd..eb48d5df4a0f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -143,7 +143,12 @@ __dma_clean_range: dcache_line_size x2, x3 sub x3, x2, #1 bic x0, x0, x3 -1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE +1: +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc cvac, x0 +alternative_else + dc civac, x0 +alternative_endif add x0, x0, x2 cmp x0, x1 b.lo 1b -- 2.4.3
Hi Daniel,
On Fri, Jul 10, 2015 at 02:48:50PM +0100, Daniel Thompson wrote:
The existing alternative_insn macro has some limitations that make it hard to work with. In partiuclar the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
Introduce an alternative set of macros that allows instructions to be presented to the assembler as normal and switch everything over to the new macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org
Notes: To be honest these if not/else/endif macros are simply more readable than the original macro and that might be enough to justify them on their own. However below is an example that is needlessly hard to write without them because ICC_PMR_EL1 is a C pre-processor macro. .macro disable_irq, tmp mov \tmp, #ICC_PMR_EL1_MASKED alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF msr daifset, #2 alternative_else msr_s ICC_PMR_EL1, \tmp alternative_endif .endm The new macros have received a fair degree of testing because I have based my (not published since March) pseudo-NMI patch set on them.
arch/arm64/include/asm/alternative.h | 18 ++++++++++++------ arch/arm64/kernel/entry.S | 29 +++++++++++++---------------- arch/arm64/kvm/hyp.S | 12 ++++++++++-- arch/arm64/mm/cache.S | 7 ++++++- 4 files changed, 41 insertions(+), 25 deletions(-)
After some consideration, I think I prefer your suggestion over what we currently have in mainline. However, there are a bunch of patches that are candidates for 4.3 which will conflict horribly with this.
Would you be able to:
(1) Split this up so that you have a patch introducing the new macro, then a patch converting entry.S and cache.S then a separate one for kvm/hyp.S?
(2) Keep alternative_insn around for the moment
(3) Once the dust has settled for 4.3, we can see how easy the old macro is to remove
Sound ok to you?
Cheers,
Will
On 16/07/15 19:19, Will Deacon wrote:
Notes: To be honest these if not/else/endif macros are simply more readable than the original macro and that might be enough to justify them on their own. However below is an example that is needlessly hard to write without them because ICC_PMR_EL1 is a C pre-processor macro.
.macro disable_irq, tmp mov \tmp, #ICC_PMR_EL1_MASKED alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF msr daifset, #2 alternative_else msr_s ICC_PMR_EL1, \tmp alternative_endif .endm The new macros have received a fair degree of testing because I have based my (not published since March) pseudo-NMI patch set on them.
After some consideration, I think I prefer your suggestion over what we currently have in mainline. However, there are a bunch of patches that are candidates for 4.3 which will conflict horribly with this.
Would you be able to:
(1) Split this up so that you have a patch introducing the new macro, then a patch converting entry.S and cache.S then a separate one for kvm/hyp.S?
(2) Keep alternative_insn around for the moment
(3) Once the dust has settled for 4.3, we can see how easy the old macro is to remove
Sound ok to you?
Absolutely fine.
I will get the split out patches posted soon.
Daniel.
Will: This is a split out version of previous patch, as requested. I have retained a patch at the send of the series to nix alternative_insn but there's no need to take this one for 4.3. Likewise I've fully split out all the switch-over patches so you can just drop them if they bring any merge trouble.
The existing alternative_insn macro has some limitations that make it hard to work with. In particular the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
This patchset introduces new macros that allow instructions to be presented to the assembler as normal and overcomes these limitations, together with patches to switch all existing users to the new macros.
My view is that these if_not/else/endif macros are more readable than the original macro and that alone might be enough to justify them. However below is an concrete example of an alterntive sequence that is needlessly hard to write without them because ICC_PMR_EL1 is a C pre-processor macro.
.macro disable_irq, tmp mov \tmp, #ICC_PMR_EL1_MASKED alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF msr daifset, #2 alternative_else msr_s ICC_PMR_EL1, \tmp alternative_endif .endm
The new macros have received a fair degree of testing because I have based my (not published since March) pseudo-NMI patch set on them.
v2:
* Split big patch out into atomized components (Will Deacon). To show where I would like to go I have retained a patch to remove assembler_insn() from the code base although, based on Will's email I don't anticipate this patch being taken for 4.3.
* Add some comments to make clear the constraints imposed on alternative sequences.
Daniel Thompson (5): arm64: alternative: Provide if/else/endif assembler macros arm64: mm: Adopt new alternative assembler macros arm64: kernel: Adopt new alternative assembler macros arm64: kvm: Adopt new alternative assembler macros arm64: alternative: Remove alternative_insn macro
arch/arm64/include/asm/alternative.h | 40 ++++++++++++++++++++++++++++++------ arch/arm64/kernel/entry.S | 29 ++++++++++++-------------- arch/arm64/kvm/hyp.S | 12 +++++++++-- arch/arm64/mm/cache.S | 7 ++++++- 4 files changed, 63 insertions(+), 25 deletions(-)
-- 2.4.3
The existing alternative_insn macro has some limitations that make it hard to work with. In particular the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
Introduce an alternative set of macros that allows instructions to be presented to the assembler as normal and switch everything over to the new macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/include/asm/alternative.h | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..31b19ad18f7e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,6 +77,46 @@ void free_alternatives_memory(void); .org . - (662b-661b) + (664b-663b) .endm
+/* + * Begin an alternative code sequence. + * + * The code that follows this marco will be assembled and linked as + * normal. There are no restrictions on this code. + */ +.macro alternative_if_not cap + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f + .popsection +661: +.endm + +/* + * Provide the alternative code sequence. + * + * The code that follows this macro is assembled into a special + * section to be used for dynamic patching. Code that follows this + * macro must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. + * + * 2. Not jump to local labels defined outside of the alternative + * sequence. + */ +.macro alternative_else +662: .pushsection .altinstr_replacement, "ax" +663: +.endm + +/* + * Complete an alternative code sequence. + */ +.macro alternative_endif +664: .popsection + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) +.endm + #endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */
Hi Daniel,
Couple of really small comments.
On Mon, Jul 20, 2015 at 04:10:00PM +0100, Daniel Thompson wrote:
The existing alternative_insn macro has some limitations that make it hard to work with. In particular the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
Introduce an alternative set of macros that allows instructions to be presented to the assembler as normal and switch everything over to the new macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org
arch/arm64/include/asm/alternative.h | 40 ++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..31b19ad18f7e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,6 +77,46 @@ void free_alternatives_memory(void); .org . - (662b-661b) + (664b-663b) .endm +/*
- Begin an alternative code sequence.
- The code that follows this marco will be assembled and linked as
- normal. There are no restrictions on this code.
s/marco/macro/
- */
+.macro alternative_if_not cap
- .pushsection .altinstructions, "a"
- altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
- .popsection
+661: +.endm
+/*
- Provide the alternative code sequence.
- The code that follows this macro is assembled into a special
- section to be used for dynamic patching. Code that follows this
- macro must:
- Be exactly the same length (in bytes) as the default code
- sequence.
- Not jump to local labels defined outside of the alternative
- sequence.
Actually, we fix up the branch target during patching. What you can't do is jump into *another* alternative sequence.
Will
On 20/07/15 18:12, Will Deacon wrote:
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..31b19ad18f7e 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,6 +77,46 @@ void free_alternatives_memory(void); .org . - (662b-661b) + (664b-663b) .endm
+/*
- Begin an alternative code sequence.
- The code that follows this marco will be assembled and linked as
- normal. There are no restrictions on this code.
s/marco/macro/
Will fix this.
- */
+.macro alternative_if_not cap
- .pushsection .altinstructions, "a"
- altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
- .popsection
+661: +.endm
+/*
- Provide the alternative code sequence.
- The code that follows this macro is assembled into a special
- section to be used for dynamic patching. Code that follows this
- macro must:
- Be exactly the same length (in bytes) as the default code
- sequence.
- Not jump to local labels defined outside of the alternative
- sequence.
Actually, we fix up the branch target during patching. What you can't do is jump into *another* alternative sequence.
Ok. I will update this.
I saw that there must be branch target patching (due to branches to __save_vgic_v3_state) but got it into my head that branches to local labels confused the assembler. I guess I must made an unrelated syntax error because I just done a few tests and can't reproduce anything like that.
Daniel.
Convert the dynamic patching for ARM64_WORKAROUND_CLEAN_CACHE over to the newly added alternative assembler macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/mm/cache.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index bdeb5d38c2dd..eb48d5df4a0f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -143,7 +143,12 @@ __dma_clean_range: dcache_line_size x2, x3 sub x3, x2, #1 bic x0, x0, x3 -1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE +1: +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc cvac, x0 +alternative_else + dc civac, x0 +alternative_endif add x0, x0, x2 cmp x0, x1 b.lo 1b
Convert the dynamic patching for ARM64_WORKAROUND_845719 over to the newly added alternative assembler macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/kernel/entry.S | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7691a378668..be8a70d4028c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -122,26 +122,23 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 - #ifdef CONFIG_ARM64_ERRATUM_845719 - -#undef SEQUENCE_ORG -#undef SEQUENCE_ALT - +alternative_if_not ARM64_WORKAROUND_845719 + nop + nop #ifdef CONFIG_PID_IN_CONTEXTIDR - -#define SEQUENCE_ORG "nop ; nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:" - + nop +#endif +alternative_else + tbz x22, #4, 1f +#ifdef CONFIG_PID_IN_CONTEXTIDR + mrs x29, contextidr_el1 + msr contextidr_el1, x29 #else - -#define SEQUENCE_ORG "nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:" - + msr contextidr_el1, xzr #endif - - alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719 - +1: +alternative_endif #endif .endif msr elr_el1, x21 // set up the return data
Convert the dynamic patching for ARM64_HAS_SYSREG_GIC_CPUIF over to the newly added alternative assembler macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/kvm/hyp.S | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 17a8fb14f428..10915aaf0b01 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -810,7 +810,11 @@ * Call into the vgic backend for state saving */ .macro save_vgic_state - alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __save_vgic_v2_state +alternative_else + bl __save_vgic_v3_state +alternative_endif mrs x24, hcr_el2 mov x25, #HCR_INT_OVERRIDE neg x25, x25 @@ -827,7 +831,11 @@ orr x24, x24, #HCR_INT_OVERRIDE orr x24, x24, x25 msr hcr_el2, x24 - alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __restore_vgic_v2_state +alternative_else + bl __restore_vgic_v3_state +alternative_endif .endm
.macro save_timer_state
This macro has been superceded by the alterntive_if_not/else/endif macro family and is no longer used anywhere in the kerne. Remove it.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/include/asm/alternative.h | 12 ------------ 1 file changed, 12 deletions(-)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index 31b19ad18f7e..3267f0b9104a 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -65,18 +65,6 @@ void free_alternatives_memory(void); .byte \alt_len .endm
-.macro alternative_insn insn1 insn2 cap -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f - .popsection - .pushsection .altinstr_replacement, "ax" -663: \insn2 -664: .popsection - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) -.endm - /* * Begin an alternative code sequence. *
Will: This is a split out version of previous patch, as requested. I have retained a patch at the send of the series to nix alternative_insn but there's no need to take this one for 4.3. Likewise I've fully split out all the switch-over patches so you can just drop them if they bring any merge trouble.
The existing alternative_insn macro has some limitations that make it hard to work with. In particular the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
This patchset introduces new macros that allow instructions to be presented to the assembler as normal and overcomes these limitations, together with patches to switch all existing users to the new macros.
My view is that these if_not/else/endif macros are more readable than the original macro and that alone might be enough to justify them. However below is an concrete example of an alterntive sequence that is needlessly hard to write without them because ICC_PMR_EL1 is a C pre-processor macro.
.macro disable_irq, tmp mov \tmp, #ICC_PMR_EL1_MASKED alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF msr daifset, #2 alternative_else msr_s ICC_PMR_EL1, \tmp alternative_endif .endm
The new macros have received a fair degree of testing because I have based my (not published since March) pseudo-NMI patch set on them.
v3:
* Corrected some technical and spelling errors in the comments (Will Deacon).
v2:
* Split big patch out into atomized components (Will Deacon). To show where I would like to go I have retained a patch to remove assembler_insn() from the code base although, based on Will's email I don't anticipate this patch being taken for 4.3.
* Add some comments to make clear the constraints imposed on alternative sequences.
Daniel Thompson (5): arm64: alternative: Provide if/else/endif assembler macros arm64: mm: Adopt new alternative assembler macros arm64: kernel: Adopt new alternative assembler macros arm64: kvm: Adopt new alternative assembler macros arm64: alternative: Remove alternative_insn macro
arch/arm64/include/asm/alternative.h | 41 ++++++++++++++++++++++++++++++------ arch/arm64/kernel/entry.S | 29 ++++++++++++------------- arch/arm64/kvm/hyp.S | 12 +++++++++-- arch/arm64/mm/cache.S | 7 +++++- 4 files changed, 64 insertions(+), 25 deletions(-)
-- 2.4.3
The existing alternative_insn macro has some limitations that make it hard to work with. In particular the fact it takes instructions from it own macro arguments means it doesn't play very nicely with C pre-processor macros because the macro arguments look like a string to the C pre-processor. Workarounds are (probably) possible but things start to look ugly.
Introduce an alternative set of macros that allows instructions to be presented to the assembler as normal and switch everything over to the new macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/include/asm/alternative.h | 41 ++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..e86681ad0931 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -77,6 +77,47 @@ void free_alternatives_memory(void); .org . - (662b-661b) + (664b-663b) .endm
+/* + * Begin an alternative code sequence. + * + * The code that follows this macro will be assembled and linked as + * normal. There are no restrictions on this code. + */ +.macro alternative_if_not cap + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f + .popsection +661: +.endm + +/* + * Provide the alternative code sequence. + * + * The code that follows this macro is assembled into a special + * section to be used for dynamic patching. Code that follows this + * macro must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. + * + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ +.macro alternative_else +662: .pushsection .altinstr_replacement, "ax" +663: +.endm + +/* + * Complete an alternative code sequence. + */ +.macro alternative_endif +664: .popsection + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) +.endm + #endif /* __ASSEMBLY__ */
#endif /* __ASM_ALTERNATIVE_H */
Convert the dynamic patching for ARM64_WORKAROUND_CLEAN_CACHE over to the newly added alternative assembler macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/mm/cache.S | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index bdeb5d38c2dd..eb48d5df4a0f 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -143,7 +143,12 @@ __dma_clean_range: dcache_line_size x2, x3 sub x3, x2, #1 bic x0, x0, x3 -1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE +1: +alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE + dc cvac, x0 +alternative_else + dc civac, x0 +alternative_endif add x0, x0, x2 cmp x0, x1 b.lo 1b
Convert the dynamic patching for ARM64_WORKAROUND_845719 over to the newly added alternative assembler macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/kernel/entry.S | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7691a378668..be8a70d4028c 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -122,26 +122,23 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 - #ifdef CONFIG_ARM64_ERRATUM_845719 - -#undef SEQUENCE_ORG -#undef SEQUENCE_ALT - +alternative_if_not ARM64_WORKAROUND_845719 + nop + nop #ifdef CONFIG_PID_IN_CONTEXTIDR - -#define SEQUENCE_ORG "nop ; nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:" - + nop +#endif +alternative_else + tbz x22, #4, 1f +#ifdef CONFIG_PID_IN_CONTEXTIDR + mrs x29, contextidr_el1 + msr contextidr_el1, x29 #else - -#define SEQUENCE_ORG "nop ; nop" -#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:" - + msr contextidr_el1, xzr #endif - - alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719 - +1: +alternative_endif #endif .endif msr elr_el1, x21 // set up the return data
Convert the dynamic patching for ARM64_HAS_SYSREG_GIC_CPUIF over to the newly added alternative assembler macros.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/kvm/hyp.S | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 17a8fb14f428..10915aaf0b01 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -810,7 +810,11 @@ * Call into the vgic backend for state saving */ .macro save_vgic_state - alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __save_vgic_v2_state +alternative_else + bl __save_vgic_v3_state +alternative_endif mrs x24, hcr_el2 mov x25, #HCR_INT_OVERRIDE neg x25, x25 @@ -827,7 +831,11 @@ orr x24, x24, #HCR_INT_OVERRIDE orr x24, x24, x25 msr hcr_el2, x24 - alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF +alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF + bl __restore_vgic_v2_state +alternative_else + bl __restore_vgic_v3_state +alternative_endif .endm
.macro save_timer_state
Marc, Christoffer,
On Wed, Jul 22, 2015 at 12:21:04PM +0100, Daniel Thompson wrote:
Convert the dynamic patching for ARM64_HAS_SYSREG_GIC_CPUIF over to the newly added alternative assembler macros.
Do you mind if I take this via the arm64 tree? It won't apply in isolation and I'd expect conflicts to be small/trivial to resolve.
Cheers,
Will
arch/arm64/kvm/hyp.S | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 17a8fb14f428..10915aaf0b01 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -810,7 +810,11 @@
- Call into the vgic backend for state saving
*/ .macro save_vgic_state
- alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
- bl __save_vgic_v2_state
+alternative_else
- bl __save_vgic_v3_state
+alternative_endif mrs x24, hcr_el2 mov x25, #HCR_INT_OVERRIDE neg x25, x25 @@ -827,7 +831,11 @@ orr x24, x24, #HCR_INT_OVERRIDE orr x24, x24, x25 msr hcr_el2, x24
- alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
- bl __restore_vgic_v2_state
+alternative_else
- bl __restore_vgic_v3_state
+alternative_endif .endm .macro save_timer_state -- 2.4.3
On 22/07/15 16:17, Will Deacon wrote:
Hi Will,
On Wed, Jul 22, 2015 at 12:21:04PM +0100, Daniel Thompson wrote:
Convert the dynamic patching for ARM64_HAS_SYSREG_GIC_CPUIF over to the newly added alternative assembler macros.
Do you mind if I take this via the arm64 tree? It won't apply in isolation and I'd expect conflicts to be small/trivial to resolve.
Looks perfectly sensible to me, and I'm happy for you to take this. I don't believe we have anything queued for 4.3 that would clash with this anyway.
Acked-by: Marc Zyngier marc.zyngier@arm.com
M.
This macro has been superceded by the alterntive_if_not/else/endif macro family and is no longer used anywhere in the kerne. Remove it.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org --- arch/arm64/include/asm/alternative.h | 12 ------------ 1 file changed, 12 deletions(-)
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index e86681ad0931..81c7369eb358 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -65,18 +65,6 @@ void free_alternatives_memory(void); .byte \alt_len .endm
-.macro alternative_insn insn1 insn2 cap -661: \insn1 -662: .pushsection .altinstructions, "a" - altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f - .popsection - .pushsection .altinstr_replacement, "ax" -663: \insn2 -664: .popsection - .org . - (664b-663b) + (662b-661b) - .org . - (662b-661b) + (664b-663b) -.endm - /* * Begin an alternative code sequence. *
linaro-kernel@lists.linaro.org