Add test cases to test the correctness of PFN ZERO flag of pagemap_scan ioctl. Test with normal pages backed memory and huge pages backed memory.
Cc: David Hildenbrand david@redhat.com Signed-off-by: Muhammad Usama Anjum usama.anjum@collabora.com --- The bug has been fixed [1].
[1] https://lore.kernel.org/all/20250617143532.2375383-1-david@redhat.com --- tools/testing/selftests/mm/pagemap_ioctl.c | 57 +++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c index 57b4bba2b45f3..6138de0087edf 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 + #define _GNU_SOURCE #include <stdio.h> #include <fcntl.h> @@ -1480,6 +1481,57 @@ static void transact_test(int page_size) extra_thread_faults); }
+void zeropfn_tests(void) +{ + unsigned long long mem_size; + struct page_region vec; + int i, ret; + char *mem; + + /* Test with page backed memory */ + mem_size = 10 * page_size; + mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n"); + + /* Touch each page to ensure it's mapped */ + for (i = 0; i < mem_size; i += page_size) + (void)((volatile char *)mem)[i]; + + ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0, + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size), + "%s all pages must have PFNZERO set\n", __func__); + + munmap(mem, mem_size); + + /* Test with huge page */ + mem_size = 10 * hpage_size; + mem = memalign(hpage_size, mem_size); + if (!mem) + ksft_exit_fail_msg("error nomem\n"); + + ret = madvise(mem, mem_size, MADV_HUGEPAGE); + if (ret) + ksft_exit_fail_msg("madvise failed %d %s\n", errno, strerror(errno)); + + for (i = 0; i < mem_size; i += hpage_size) + (void)((volatile char *)mem)[i]; + + ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0, + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + + ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size), + "%s all huge pages must have PFNZERO set\n", __func__); + + free(mem); +} + int main(int __attribute__((unused)) argc, char *argv[]) { int shmid, buf_size, fd, i, ret; @@ -1494,7 +1546,7 @@ int main(int __attribute__((unused)) argc, char *argv[]) if (init_uffd()) ksft_exit_pass();
- ksft_set_plan(115); + ksft_set_plan(117);
page_size = getpagesize(); hpage_size = read_pmd_pagesize(); @@ -1669,6 +1721,9 @@ int main(int __attribute__((unused)) argc, char *argv[]) /* 16. Userfaultfd tests */ userfaultfd_tests();
+ /* 17. ZEROPFN tests */ + zeropfn_tests(); + close(pagemap_fd); ksft_exit_pass(); }
On 30.06.25 12:24, Muhammad Usama Anjum wrote:
Add test cases to test the correctness of PFN ZERO flag of pagemap_scan ioctl. Test with normal pages backed memory and huge pages backed memory.
Just to verify: would this trigger on kernels before my fix?
Cc: David Hildenbrand david@redhat.com Signed-off-by: Muhammad Usama Anjum usama.anjum@collabora.com
The bug has been fixed [1].
[1] https://lore.kernel.org/all/20250617143532.2375383-1-david@redhat.com
tools/testing/selftests/mm/pagemap_ioctl.c | 57 +++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c index 57b4bba2b45f3..6138de0087edf 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0
- #define _GNU_SOURCE #include <stdio.h> #include <fcntl.h>
@@ -1480,6 +1481,57 @@ static void transact_test(int page_size) extra_thread_faults); } +void zeropfn_tests(void) +{
- unsigned long long mem_size;
- struct page_region vec;
- int i, ret;
- char *mem;
- /* Test with page backed memory */
What is "page backed memory" ? :)
- mem_size = 10 * page_size;
- mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (mem == MAP_FAILED)
ksft_exit_fail_msg("error nomem\n");
- /* Touch each page to ensure it's mapped */
- for (i = 0; i < mem_size; i += page_size)
(void)((volatile char *)mem)[i];
- ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
(mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
- if (ret < 0)
ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
- ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
"%s all pages must have PFNZERO set\n", __func__);
- munmap(mem, mem_size);
- /* Test with huge page */
- mem_size = 10 * hpage_size;
- mem = memalign(hpage_size, mem_size);
- if (!mem)
ksft_exit_fail_msg("error nomem\n");
- ret = madvise(mem, mem_size, MADV_HUGEPAGE);
- if (ret)
ksft_exit_fail_msg("madvise failed %d %s\n", errno, strerror(errno));
Might fail on older kernels, so we usually treat this as a skip.
- for (i = 0; i < mem_size; i += hpage_size)
(void)((volatile char *)mem)[i];
- ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0,
(mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO);
- if (ret < 0)
ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
- ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size),
"%s all huge pages must have PFNZERO set\n", __func__);
Wouldn't this be able to fail if /sys/kernel/mm/transparent_hugepage/use_zero_page is set to false, or if mmap() gave us a suboptimally-aligned range?
You'd have to read each and every page to get the ordinary shared zeropage in these configs instead without making the test too complicated.
- free(mem);
Shouldn't this be an munmap() ?
+}
- int main(int __attribute__((unused)) argc, char *argv[]) { int shmid, buf_size, fd, i, ret;
@@ -1494,7 +1546,7 @@ int main(int __attribute__((unused)) argc, char *argv[]) if (init_uffd()) ksft_exit_pass();
- ksft_set_plan(115);
- ksft_set_plan(117);
We should probably look into converting this test to kselftest_harness.
On 7/1/25 7:51 PM, David Hildenbrand wrote:
On 30.06.25 12:24, Muhammad Usama Anjum wrote:
Add test cases to test the correctness of PFN ZERO flag of pagemap_scan ioctl. Test with normal pages backed memory and huge pages backed memory.
Just to verify: would this trigger on kernels before my fix?
Yes, it does trigger the bug without the fix.
Cc: David Hildenbrand david@redhat.com Signed-off-by: Muhammad Usama Anjum usama.anjum@collabora.com
The bug has been fixed [1].
[1] https://lore.kernel.org/all/20250617143532.2375383-1-david@redhat.com
tools/testing/selftests/mm/pagemap_ioctl.c | 57 +++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/ testing/selftests/mm/pagemap_ioctl.c index 57b4bba2b45f3..6138de0087edf 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE #include <stdio.h> #include <fcntl.h> @@ -1480,6 +1481,57 @@ static void transact_test(int page_size) extra_thread_faults); } +void zeropfn_tests(void) +{ + unsigned long long mem_size; + struct page_region vec; + int i, ret; + char *mem;
+ /* Test with page backed memory */
What is "page backed memory" ? :)
I mean, normal memory which isn't huge page backed. I've renamed it to Test with normal memory.
+ mem_size = 10 * page_size; + mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n");
+ /* Touch each page to ensure it's mapped */ + for (i = 0; i < mem_size; i += page_size) + (void)((volatile char *)mem)[i];
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0, + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size), + "%s all pages must have PFNZERO set\n", __func__);
+ munmap(mem, mem_size);
+ /* Test with huge page */ + mem_size = 10 * hpage_size; + mem = memalign(hpage_size, mem_size); + if (!mem) + ksft_exit_fail_msg("error nomem\n");
+ ret = madvise(mem, mem_size, MADV_HUGEPAGE); + if (ret) + ksft_exit_fail_msg("madvise failed %d %s\n", errno, strerror(errno));
Might fail on older kernels, so we usually treat this as a skip.
I'll skip it in next version.
+ for (i = 0; i < mem_size; i += hpage_size) + (void)((volatile char *)mem)[i];
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0, + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size), + "%s all huge pages must have PFNZERO set\n", __func__);
Wouldn't this be able to fail if /sys/kernel/mm/transparent_hugepage/ use_zero_page is set to false,
I wasn't aware of it. I'll check user_zero_page first as initial condition.
or if mmap() gave us a suboptimally- aligned range?
mem = memalign(hpage_size, mem_size) is being used to allocate this. So aligment should be correct.
You'd have to read each and every page to get the ordinary shared zeropage in these configs instead without making the test too complicated.
In the above for loop, we are reading each new page already. Let's resolve this and then I'll post the v2 which is ready.
+ free(mem);
Shouldn't this be an munmap() ?
free() is being used to free memory allocated by memalign().
+}
int main(int __attribute__((unused)) argc, char *argv[]) { int shmid, buf_size, fd, i, ret; @@ -1494,7 +1546,7 @@ int main(int __attribute__((unused)) argc, char *argv[]) if (init_uffd()) ksft_exit_pass(); - ksft_set_plan(115); + ksft_set_plan(117);
We should probably look into converting this test to kselftest_harness.
On 02.07.25 09:39, Muhammad Usama Anjum wrote:
On 7/1/25 7:51 PM, David Hildenbrand wrote:
On 30.06.25 12:24, Muhammad Usama Anjum wrote:
Add test cases to test the correctness of PFN ZERO flag of pagemap_scan ioctl. Test with normal pages backed memory and huge pages backed memory.
Just to verify: would this trigger on kernels before my fix?
Yes, it does trigger the bug without the fix.
Cc: David Hildenbrand david@redhat.com Signed-off-by: Muhammad Usama Anjum usama.anjum@collabora.com
The bug has been fixed [1].
[1] https://lore.kernel.org/all/20250617143532.2375383-1-david@redhat.com
tools/testing/selftests/mm/pagemap_ioctl.c | 57 +++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/ testing/selftests/mm/pagemap_ioctl.c index 57b4bba2b45f3..6138de0087edf 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE #include <stdio.h> #include <fcntl.h> @@ -1480,6 +1481,57 @@ static void transact_test(int page_size) extra_thread_faults); } +void zeropfn_tests(void) +{ + unsigned long long mem_size; + struct page_region vec; + int i, ret; + char *mem;
+ /* Test with page backed memory */
What is "page backed memory" ? :)
I mean, normal memory which isn't huge page backed. I've renamed it to Test with normal memory.
+ mem_size = 10 * page_size; + mem = mmap(NULL, mem_size, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); + if (mem == MAP_FAILED) + ksft_exit_fail_msg("error nomem\n");
+ /* Touch each page to ensure it's mapped */ + for (i = 0; i < mem_size; i += page_size) + (void)((volatile char *)mem)[i];
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0, + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size), + "%s all pages must have PFNZERO set\n", __func__);
+ munmap(mem, mem_size);
+ /* Test with huge page */ + mem_size = 10 * hpage_size; + mem = memalign(hpage_size, mem_size); + if (!mem) + ksft_exit_fail_msg("error nomem\n");
+ ret = madvise(mem, mem_size, MADV_HUGEPAGE); + if (ret) + ksft_exit_fail_msg("madvise failed %d %s\n", errno, strerror(errno));
Might fail on older kernels, so we usually treat this as a skip.
I'll skip it in next version.
+ for (i = 0; i < mem_size; i += hpage_size) + (void)((volatile char *)mem)[i];
+ ret = pagemap_ioctl(mem, mem_size, &vec, 1, 0, + (mem_size / page_size), PAGE_IS_PFNZERO, 0, 0, PAGE_IS_PFNZERO); + if (ret < 0) + ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno));
+ ksft_test_result(ret == 1 && LEN(vec) == (mem_size / page_size), + "%s all huge pages must have PFNZERO set\n", __func__);
Wouldn't this be able to fail if /sys/kernel/mm/transparent_hugepage/ use_zero_page is set to false,
I wasn't aware of it. I'll check user_zero_page first as initial condition.
or if mmap() gave us a suboptimally- aligned range?
mem = memalign(hpage_size, mem_size) is being used to allocate this. So aligment should be correct.
You'd have to read each and every page to get the ordinary shared zeropage in these configs instead without making the test too complicated.
In the above for loop, we are reading each new page already. Let's resolve this and then I'll post the v2 which is ready.
+ free(mem);
Shouldn't this be an munmap() ?
free() is being used to free memory allocated by memalign().
Oh, I missed that detail.
I'm afraid memalign+free that might not be what you want: there is no guarantee that what you are getting hasn't been used before and is not already filled with other pages?
...
+ free(mem);
Shouldn't this be an munmap() ?
free() is being used to free memory allocated by memalign().
Oh, I missed that detail.
I'm afraid memalign+free that might not be what you want: there is no guarantee that what you are getting hasn't been used before and is not already filled with other pages?
I'm not sure I've been using memalign() to allocate aligned memory and the using madvise().
I'll switch up to mmap() for this test. I'll send v2.
linux-kselftest-mirror@lists.linaro.org