From: Vincent Donnefort vdonnefort@google.com
Improve the ring-buffer meta-page test coverage by checking for the entire padding region to be 0 instead of just looking at the first 4 bytes.
Cc: linux-kselftest@vger.kernel.org Link: https://lore.kernel.org/20240910162335.2993310-2-vdonnefort@google.com Acked-by: Shuah Khan skhan@linuxfoundation.org Signed-off-by: Vincent Donnefort vdonnefort@google.com Signed-off-by: Steven Rostedt (Google) rostedt@goodmis.org --- tools/testing/selftests/ring-buffer/map_test.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/tools/testing/selftests/ring-buffer/map_test.c b/tools/testing/selftests/ring-buffer/map_test.c index 4bb0192e43f3..ba12fd31de87 100644 --- a/tools/testing/selftests/ring-buffer/map_test.c +++ b/tools/testing/selftests/ring-buffer/map_test.c @@ -231,15 +231,15 @@ TEST_F(map, data_mmap)
/* Verify meta-page padding */ if (desc->meta->meta_page_size > getpagesize()) { - void *addr; - data_len = desc->meta->meta_page_size; data = mmap(NULL, data_len, PROT_READ, MAP_SHARED, desc->cpu_fd, 0); ASSERT_NE(data, MAP_FAILED);
- addr = (void *)((unsigned long)data + getpagesize()); - ASSERT_EQ(*((int *)addr), 0); + for (int i = desc->meta->meta_struct_len; + i < desc->meta->meta_page_size; i += sizeof(int)) + ASSERT_EQ(*(int *)(data + i), 0); + munmap(data, data_len); } }