When the allocation process is scheduled back and the mapped hw queue is
changed, fake one extra wake up on previous queue for compensating wake up
miss, so other allocations on the previous queue won't be starved.
This patch fixes one request allocation hang issue, which can be
triggered easily in case of very low nr_request.
Cc: <stable(a)vger.kernel.org>
Cc: Omar Sandoval <osandov(a)fb.com>
Signed-off-by: Ming Lei <ming.lei(a)redhat.com>
---
V3:
- fix comments as suggested by Jens
- remove the wrapper as suggested by Omar
V2:
- fix build failure
block/blk-mq-tag.c | 12 ++++++++++++
include/linux/sbitmap.h | 7 +++++++
lib/sbitmap.c | 22 ++++++++++++----------
3 files changed, 31 insertions(+), 10 deletions(-)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 336dde07b230..a4e58fc28a06 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -134,6 +134,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
ws = bt_wait_ptr(bt, data->hctx);
drop_ctx = data->ctx == NULL;
do {
+ struct sbitmap_queue *bt_prev;
+
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
@@ -159,6 +161,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (data->ctx)
blk_mq_put_ctx(data->ctx);
+ bt_prev = bt;
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
@@ -170,6 +173,15 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
bt = &tags->bitmap_tags;
finish_wait(&ws->wait, &wait);
+
+ /*
+ * If destination hw queue is changed, fake wake up on
+ * previous queue for compensating the wake up miss, so
+ * other allocations on previous queue won't be starved.
+ */
+ if (bt != bt_prev)
+ sbitmap_queue_wake_up(bt_prev);
+
ws = bt_wait_ptr(bt, data->hctx);
} while (1);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 841585f6e5f2..bba9d80191b7 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -484,6 +484,13 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
/**
+ * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
+ * on a &struct sbitmap_queue.
+ * @sbq: Bitmap queue to wake up.
+ */
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+
+/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
* seq_file.
* @sbq: Bitmap queue to show.
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index e6a9c06ec70c..14e027a33ffa 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -335,8 +335,9 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
if (sbq->wake_batch != wake_batch) {
WRITE_ONCE(sbq->wake_batch, wake_batch);
/*
- * Pairs with the memory barrier in sbq_wake_up() to ensure that
- * the batch size is updated before the wait counts.
+ * Pairs with the memory barrier in sbitmap_queue_wake_up()
+ * to ensure that the batch size is updated before the wait
+ * counts.
*/
smp_mb__before_atomic();
for (i = 0; i < SBQ_WAIT_QUEUES; i++)
@@ -425,7 +426,7 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
return NULL;
}
-static void sbq_wake_up(struct sbitmap_queue *sbq)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
unsigned int wake_batch;
@@ -454,23 +455,24 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
*/
smp_mb__before_atomic();
/*
- * If there are concurrent callers to sbq_wake_up(), the last
- * one to decrement the wait count below zero will bump it back
- * up. If there is a concurrent resize, the count reset will
- * either cause the cmpxchg to fail or overwrite after the
- * cmpxchg.
+ * If there are concurrent callers to sbitmap_queue_wake_up(),
+ * the last one to decrement the wait count below zero will
+ * bump it back up. If there is a concurrent resize, the count
+ * reset will either cause the cmpxchg to fail or overwrite
+ * after the cmpxchg.
*/
atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
sbq_index_atomic_inc(&sbq->wake_index);
wake_up_nr(&ws->wait, wake_batch);
}
}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
sbitmap_clear_bit_unlock(&sbq->sb, nr);
- sbq_wake_up(sbq);
+ sbitmap_queue_wake_up(sbq);
if (likely(!sbq->round_robin && nr < sbq->sb.depth))
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
}
@@ -482,7 +484,7 @@ void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
/*
* Pairs with the memory barrier in set_current_state() like in
- * sbq_wake_up().
+ * sbitmap_queue_wake_up().
*/
smp_mb();
wake_index = atomic_read(&sbq->wake_index);
--
2.9.5
When the allocation process is scheduled back and the mapped hw queue is
changed, do one extra wake up on orignal queue for compensating wake up
miss, so other allocations on the orignal queue won't be starved.
This patch fixes one request allocation hang issue, which can be
triggered easily in case of very low nr_request.
Cc: <stable(a)vger.kernel.org>
Cc: Omar Sandoval <osandov(a)fb.com>
Signed-off-by: Ming Lei <ming.lei(a)redhat.com>
---
V2:
fix build failure
block/blk-mq-tag.c | 13 +++++++++++++
include/linux/sbitmap.h | 7 +++++++
lib/sbitmap.c | 6 ++++++
3 files changed, 26 insertions(+)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 336dde07b230..77607f89d205 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -134,6 +134,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
ws = bt_wait_ptr(bt, data->hctx);
drop_ctx = data->ctx == NULL;
do {
+ struct sbitmap_queue *bt_orig;
+
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
@@ -159,6 +161,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (data->ctx)
blk_mq_put_ctx(data->ctx);
+ bt_orig = bt;
io_schedule();
data->ctx = blk_mq_get_ctx(data->q);
@@ -170,6 +173,16 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
bt = &tags->bitmap_tags;
finish_wait(&ws->wait, &wait);
+
+ /*
+ * If destination hw queue is changed, wake up original
+ * queue one extra time for compensating the wake up
+ * miss, so other allocations on original queue won't
+ * be starved.
+ */
+ if (bt != bt_orig)
+ sbitmap_queue_wake_up(bt_orig);
+
ws = bt_wait_ptr(bt, data->hctx);
} while (1);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 841585f6e5f2..b23f50355281 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -484,6 +484,13 @@ static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
/**
+ * sbitmap_wake_up() - Do a regular wake up compensation if the queue
+ * allocated from is changed after scheduling back.
+ * @sbq: Bitmap queue to wake up.
+ */
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
+
+/**
* sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
* seq_file.
* @sbq: Bitmap queue to show.
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index e6a9c06ec70c..c6ae4206bcb1 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -466,6 +466,12 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
}
}
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
+{
+ sbq_wake_up(sbq);
+}
+EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
+
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
--
2.9.5
Hi Greg,
9 more patches against the 2018/05/23 linux-4.9.y stable branch.
This gets the spectre defense of 4.9 up-to-date compared to the
current upstream tree. The upstream patches to remove the indirect
branches from the BPF JIT are included (these do not have a
CC:stable tag).
Martin Schwidefsky (9):
s390: add assembler macros for CPU alternatives
s390: move expoline assembler macros to a header
s390/crc32-vx: use expoline for indirect branches
s390/lib: use expoline for indirect branches
s390/ftrace: use expoline for indirect branches
s390/kernel: use expoline for indirect branches
s390: move spectre sysfs attribute code
s390: extend expoline to BC instructions
s390: use expoline thunks in the BPF JIT
arch/s390/crypto/crc32be-vx.S | 5 +-
arch/s390/crypto/crc32le-vx.S | 4 +-
arch/s390/include/asm/alternative-asm.h | 108 ++++++++++++++++++
arch/s390/include/asm/nospec-insn.h | 195 ++++++++++++++++++++++++++++++++
arch/s390/kernel/Makefile | 1 +
arch/s390/kernel/asm-offsets.c | 1 +
arch/s390/kernel/base.S | 24 ++--
arch/s390/kernel/entry.S | 105 ++++-------------
arch/s390/kernel/mcount.S | 14 ++-
arch/s390/kernel/nospec-branch.c | 43 ++++---
arch/s390/kernel/nospec-sysfs.c | 21 ++++
arch/s390/kernel/reipl.S | 7 +-
arch/s390/kernel/swsusp.S | 9 +-
arch/s390/lib/mem.S | 9 +-
arch/s390/net/bpf_jit.S | 16 ++-
arch/s390/net/bpf_jit_comp.c | 63 ++++++++++-
16 files changed, 488 insertions(+), 137 deletions(-)
create mode 100644 arch/s390/include/asm/alternative-asm.h
create mode 100644 arch/s390/include/asm/nospec-insn.h
create mode 100644 arch/s390/kernel/nospec-sysfs.c
--
2.16.3
Hi Greg,
9 more patches against the 2018/05/23 linux-4.14.y stable branch.
This gets the spectre defense of 4.14 up-to-date compared to the
current upstream tree. The upstream patches to remove the indirect
branches from the BPF JIT are included (these do not have a
CC:stable tag).
Martin Schwidefsky (9):
s390: add assembler macros for CPU alternatives
s390: move expoline assembler macros to a header
s390/crc32-vx: use expoline for indirect branches
s390/lib: use expoline for indirect branches
s390/ftrace: use expoline for indirect branches
s390/kernel: use expoline for indirect branches
s390: move spectre sysfs attribute code
s390: extend expoline to BC instructions
s390: use expoline thunks in the BPF JIT
arch/s390/crypto/crc32be-vx.S | 5 +-
arch/s390/crypto/crc32le-vx.S | 4 +-
arch/s390/include/asm/alternative-asm.h | 108 ++++++++++++++++++
arch/s390/include/asm/nospec-insn.h | 195 ++++++++++++++++++++++++++++++++
arch/s390/kernel/Makefile | 1 +
arch/s390/kernel/asm-offsets.c | 1 +
arch/s390/kernel/base.S | 24 ++--
arch/s390/kernel/entry.S | 105 ++++-------------
arch/s390/kernel/mcount.S | 14 ++-
arch/s390/kernel/nospec-branch.c | 43 ++++---
arch/s390/kernel/nospec-sysfs.c | 21 ++++
arch/s390/kernel/reipl.S | 7 +-
arch/s390/kernel/swsusp.S | 10 +-
arch/s390/lib/mem.S | 13 ++-
arch/s390/net/bpf_jit.S | 16 ++-
arch/s390/net/bpf_jit_comp.c | 63 ++++++++++-
16 files changed, 490 insertions(+), 140 deletions(-)
create mode 100644 arch/s390/include/asm/alternative-asm.h
create mode 100644 arch/s390/include/asm/nospec-insn.h
create mode 100644 arch/s390/kernel/nospec-sysfs.c
--
2.16.3
Hi Greg,
Please queue up this series of patches for 4.16 if you have no objections.
These are mostly clean backports but one or two required some fixing up, hench
the backport.
cheers
Mauricio Faria de Oliveira (2):
powerpc/pseries: Fix clearing of security feature flags
powerpc: Move default security feature flags
Michael Ellerman (11):
powerpc/rfi-flush: Always enable fallback flush on pseries
powerpc: Add security feature flags for Spectre/Meltdown
powerpc/pseries: Add new H_GET_CPU_CHARACTERISTICS flags
powerpc/pseries: Set or clear security feature flags
powerpc/powernv: Set or clear security feature flags
powerpc/64s: Move cpu_show_meltdown()
powerpc/64s: Enhance the information in cpu_show_meltdown()
powerpc/powernv: Use the security flags in pnv_setup_rfi_flush()
powerpc/pseries: Use the security flags in pseries_setup_rfi_flush()
powerpc/64s: Wire up cpu_show_spectre_v1()
powerpc/64s: Wire up cpu_show_spectre_v2()
Nicholas Piggin (1):
powerpc/64s: Add support for a store forwarding barrier at kernel
entry/exit
arch/powerpc/include/asm/exception-64s.h | 29 ++++
arch/powerpc/include/asm/feature-fixups.h | 19 +++
arch/powerpc/include/asm/hvcall.h | 3 +
arch/powerpc/include/asm/security_features.h | 85 ++++++++++
arch/powerpc/kernel/Makefile | 2 +-
arch/powerpc/kernel/exceptions-64s.S | 19 ++-
arch/powerpc/kernel/security.c | 237 +++++++++++++++++++++++++++
arch/powerpc/kernel/setup_64.c | 8 -
arch/powerpc/kernel/vmlinux.lds.S | 14 ++
arch/powerpc/lib/feature-fixups.c | 115 +++++++++++++
arch/powerpc/platforms/powernv/setup.c | 96 +++++++----
arch/powerpc/platforms/pseries/setup.c | 71 +++++---
12 files changed, 638 insertions(+), 60 deletions(-)
create mode 100644 arch/powerpc/include/asm/security_features.h
create mode 100644 arch/powerpc/kernel/security.c
--
2.14.1
Changes since v1: [1]
* Kill support for mapping System RAM as a nop. No one uses this
functionality and it is broken relative to percpu_ref management.
* Fix percpu_ref teardown. Given that devm_memremap_pages() has strict
assumptions about when the percpu_ref is killed, give it
responsibility to make the live-dead transition explicitly. (Logan)
* Split the patch that adds HMM support to devm_memremap_pages() from
the patch that converts HMM to use devm_memremap_pages(). This caught
an incomplete conversion in v1. (Logan)
* Collect Christoph's reviewed-by.
[1]: https://lkml.org/lkml/2018/5/21/1109
---
Hi Andrew, here's v2 to replace the 5 currently in mm. The first and
last patch did not change.
For maintainability, as ZONE_DEVICE continues to attract new users,
it is useful to keep all users consolidated on devm_memremap_pages() as
the interface for create "device pages".
The devm_memremap_pages() implementation was recently reworked to make
it more generic for arbitrary users, like the proposed peer-to-peer
PCI-E enabling. HMM pre-dated this rework and opted to duplicate
devm_memremap_pages() as hmm_devmem_pages_create().
Rework HMM to be a consumer of devm_memremap_pages() directly and fix up
the licensing on the exports given the deep dependencies on the mm.
Patches based on v4.17-rc6 where there are no upstream consumers of the
HMM functionality.
---
Dan Williams (7):
mm, devm_memremap_pages: Mark devm_memremap_pages() EXPORT_SYMBOL_GPL
mm, devm_memremap_pages: Kill mapping "System RAM" support
mm, devm_memremap_pages: Fix shutdown handling
mm, devm_memremap_pages: Add MEMORY_DEVICE_PRIVATE support
mm, hmm: Use devm semantics for hmm_devmem_{add,remove}
mm, hmm: Replace hmm_devmem_pages_create() with devm_memremap_pages()
mm, hmm: Mark hmm_devmem_{add,add_resource} EXPORT_SYMBOL_GPL
Documentation/vm/hmm.txt | 1
drivers/dax/pmem.c | 10 -
drivers/nvdimm/pmem.c | 18 +-
include/linux/hmm.h | 4
include/linux/memremap.h | 7 +
kernel/memremap.c | 85 +++++++---
mm/hmm.c | 307 +++++--------------------------------
tools/testing/nvdimm/test/iomap.c | 21 ++-
8 files changed, 130 insertions(+), 323 deletions(-)
Depending on whether the kernel is compiled with frame-pointer or not,
the temporary memory location used for the bp parameter in these macros
is referenced relative to the stack pointer or the frame pointer.
Hence we can never reference that parameter when we've modified either
the stack pointer or the frame pointer, because then the compiler would
generate an incorrect stack reference.
Fix this by pushing the temporary memory parameter on a known location on
the stack before modifying the stack- and frame pointers.
Cc: <stable(a)vger.kernel.org>
Signed-off-by: Thomas Hellstrom <thellstrom(a)vmware.com>
Reviewed-by: Brian Paul <brianp(a)vmware.com>
Reviewed-by: Sinclair Yeh <syeh(a)vmware.com>
---
drivers/gpu/drm/vmwgfx/vmwgfx_msg.h | 25 +++++++++++++++++--------
1 file changed, 17 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
index 557a033fb610..8545488aa0cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
@@ -135,17 +135,24 @@
#else
-/* In the 32-bit version of this macro, we use "m" because there is no
- * more register left for bp
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
*/
#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
port_num, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%ebp;" \
- "mov %12, %%ebp;" \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
"rep outsb;" \
- "pop %%ebp;" : \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
@@ -167,10 +174,12 @@
port_num, magic, bp, \
eax, ebx, ecx, edx, si, di) \
({ \
- asm volatile ("push %%ebp;" \
- "mov %12, %%ebp;" \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
"rep insb;" \
- "pop %%ebp" : \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
"=a"(eax), \
"=b"(ebx), \
"=c"(ecx), \
--
2.17.0
The __clear_user function is defined to return the number of bytes that
could not be cleared. From the underlying memset / bzero implementation
this means setting register a2 to that number on return. Currently if a
page fault is triggered within the MIPSr6 version of setting of initial
unaligned bytes, the value loaded into a2 on return is meaningless.
During the MIPSr6 version of the initial unaligned bytes block, register
a2 contains the number of bytes to be set beyond the initial unaligned
bytes. The t0 register is initally set to the number of unaligned bytes
- STORSIZE, effectively a negative version of the number of unaligned
bytes. This is then incremented before each byte is saved.
The label .Lbyte_fixup\@ is jumped to on page fault. Currently the value
in a2 is incorrectly replaced by 0 - t0 + 1, effectively the number of
unaligned bytes remaining. This leads to the failures being reported by
the following test code:
static int __init test_clear_user(void)
{
int j, k;
pr_info("\n\n\nTesting clear_user\n");
for (j = 0; j < 512; j++) {
if ((k = clear_user(NULL+3, j)) != j) {
pr_err("clear_user (NULL %d) returned %d\n", j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
Which reports:
[ 3.965439] Testing clear_user
[ 3.973169] clear_user (NULL 8) returned 6
[ 3.976782] clear_user (NULL 9) returned 6
[ 3.980390] clear_user (NULL 10) returned 6
[ 3.984052] clear_user (NULL 11) returned 6
[ 3.987524] clear_user (NULL 12) returned 6
Fix this by subtracting t0 from a2 (rather than $0), effectivey giving:
unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
a2 = a2 - t0 + 1
This fixes the value returned from __clear user when the number of bytes
to set is > LONGSIZE and the address is invalid and unaligned.
Unfortunately, this breaks the fixup handling for unaligned bytes after
the final long, where register a2 still contains the number of bytes
remaining to be set and the t0 register is to 0 - the number of
unaligned bytes remaining.
Because t0 is now is now subtracted from a2 rather than 0, the number of
bytes unset is reported incorrectly:
static int __init test_clear_user(void)
{
char *test;
int j, k;
pr_info("\n\n\nTesting clear_user\n");
test = vmalloc(PAGE_SIZE);
for (j = 256; j < 512; j++) {
if ((k = clear_user(test + PAGE_SIZE - 254, j)) != j - 254) {
pr_err("clear_user (%px %d) returned %d\n",
test + PAGE_SIZE - 254, j, k);
}
}
return 0;
}
late_initcall(test_clear_user);
[ 3.976775] clear_user (c00000000000df02 256) returned 4
[ 3.981957] clear_user (c00000000000df02 257) returned 6
[ 3.986425] clear_user (c00000000000df02 258) returned 8
[ 3.990850] clear_user (c00000000000df02 259) returned 10
[ 3.995332] clear_user (c00000000000df02 260) returned 12
[ 3.999815] clear_user (c00000000000df02 261) returned 14
Fix this by ensuring that a2 is set to 0 during the set of final
unaligned bytes.
Fixes: 8c56208aff77 ("MIPS: lib: memset: Add MIPS R6 support")
Cc: stable(a)vger.kernel.org
Signed-off-by: Matt Redfearn <matt.redfearn(a)mips.com>
---
Changes in v3:
New patch to fix fault handling during MIPSr6 version of setting
unaligned bytes.
Changes in v2: None
arch/mips/lib/memset.S | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 1cc306520a5..fac26ce64b2 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -195,6 +195,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
STORE_BYTE(1)
@@ -231,7 +232,7 @@
#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
- PTR_SUBU a2, $0, t0
+ PTR_SUBU a2, t0
jr ra
PTR_ADDIU a2, 1
#endif /* CONFIG_CPU_MIPSR6 */
--
2.7.4