These can be used to place an MTE tag at an address that is not at a page size boundary.
The kernel prior to 295cf156231c ("arm64: Avoid premature usercopy failure"), would infinite loop if an MTE tag was placed not at a PAGE_SIZE boundary. This is because the kernel checked if the pages were readable by checking the first byte of each page, but would then fault in the middle of the page due to the MTE tag.
Signed-off-by: Joey Gouly joey.gouly@arm.com Cc: Catalin Marinas catalin.marinas@arm.com Cc: Will Deacon will@kernel.org Cc: Mark Brown broonie@kernel.org Cc: Shuah Khan shuah@kernel.org Reviewed-by: Mark Brown broonie@kernel.org Tested-by: Mark Brown broonie@kernel.org Reviewed-by: Shuah Khan skhan@linuxfoundation.org --- .../selftests/arm64/mte/check_user_mem.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-)
diff --git a/tools/testing/selftests/arm64/mte/check_user_mem.c b/tools/testing/selftests/arm64/mte/check_user_mem.c index 1de7a0abd0ae..5a5a7e1f5789 100644 --- a/tools/testing/selftests/arm64/mte/check_user_mem.c +++ b/tools/testing/selftests/arm64/mte/check_user_mem.c @@ -19,7 +19,8 @@
static size_t page_sz;
-static int check_usermem_access_fault(int mem_type, int mode, int mapping) +static int check_usermem_access_fault(int mem_type, int mode, int mapping, + int tag_offset, int tag_len) { int fd, i, err; char val = 'A'; @@ -54,10 +55,12 @@ static int check_usermem_access_fault(int mem_type, int mode, int mapping) if (i < len) goto usermem_acc_err;
- /* Tag the next half of memory with different value */ - ptr_next = (void *)((unsigned long)ptr + page_sz); + if (!tag_len) + tag_len = len - tag_offset; + /* Tag a part of memory with different value */ + ptr_next = (void *)((unsigned long)ptr + tag_offset); ptr_next = mte_insert_new_tag(ptr_next); - mte_set_tag_address_range(ptr_next, page_sz); + mte_set_tag_address_range(ptr_next, tag_len);
lseek(fd, 0, 0); /* Copy from file into buffer with invalid tag */ @@ -100,14 +103,14 @@ int main(int argc, char *argv[]) /* Set test plan */ ksft_set_plan(4);
- evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE), + evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_PRIVATE, page_sz, 0), "Check memory access from kernel in sync mode, private mapping and mmap memory\n"); - evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED), + evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_SYNC_ERR, MAP_SHARED, page_sz, 0), "Check memory access from kernel in sync mode, shared mapping and mmap memory\n");
- evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE), + evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_PRIVATE, page_sz, 0), "Check memory access from kernel in async mode, private mapping and mmap memory\n"); - evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED), + evaluate_test(check_usermem_access_fault(USE_MMAP, MTE_ASYNC_ERR, MAP_SHARED, page_sz, 0), "Check memory access from kernel in async mode, shared mapping and mmap memory\n");
mte_restore_setup();