The kexec segment index will be required to extract the corresponding information for that segment in kimage_map_segment(). Additionally, kexec_segment already holds the kexec relocation destination address and size. Therefore, the prototype of kimage_map_segment() can be changed.
Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu piliu@redhat.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Baoquan He bhe@redhat.com Cc: Mimi Zohar zohar@linux.ibm.com Cc: Roberto Sassu roberto.sassu@huawei.com Cc: Alexander Graf graf@amazon.com Cc: Steven Chen chenste@linux.microsoft.com Cc: stable@vger.kernel.org To: kexec@lists.infradead.org To: linux-integrity@vger.kernel.org --- include/linux/kexec.h | 4 ++-- kernel/kexec_core.c | 9 ++++++--- security/integrity/ima/ima_kexec.c | 4 +--- 3 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ff7e231b0485..8a22bc9b8c6c 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print; #define kexec_dprintk(fmt, arg...) \ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
-extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size); +extern void *kimage_map_segment(struct kimage *image, int idx); extern void kimage_unmap_segment(void *buffer); #else /* !CONFIG_KEXEC_CORE */ struct pt_regs; @@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_crash_loaded(void) { return 0; } -static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size) +static inline void *kimage_map_segment(struct kimage *image, int idx) { return NULL; } static inline void kimage_unmap_segment(void *buffer) { } #define kexec_in_progress false diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index fa00b239c5d9..9a1966207041 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -960,17 +960,20 @@ int kimage_load_segment(struct kimage *image, int idx) return result; }
-void *kimage_map_segment(struct kimage *image, - unsigned long addr, unsigned long size) +void *kimage_map_segment(struct kimage *image, int idx) { + unsigned long addr, size, eaddr; unsigned long src_page_addr, dest_page_addr = 0; - unsigned long eaddr = addr + size; kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages; void *vaddr = NULL; int i;
+ addr = image->segment[idx].mem; + size = image->segment[idx].memsz; + eaddr = addr + size; + /* * Collect the source pages and map them in a contiguous VA range. */ diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c index 7362f68f2d8b..5beb69edd12f 100644 --- a/security/integrity/ima/ima_kexec.c +++ b/security/integrity/ima/ima_kexec.c @@ -250,9 +250,7 @@ void ima_kexec_post_load(struct kimage *image) if (!image->ima_buffer_addr) return;
- ima_kexec_buffer = kimage_map_segment(image, - image->ima_buffer_addr, - image->ima_buffer_size); + ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index); if (!ima_kexec_buffer) { pr_err("Could not map measurements buffer.\n"); return;
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu piliu@redhat.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Baoquan He bhe@redhat.com Cc: Alexander Graf graf@amazon.com Cc: Steven Chen chenste@linux.microsoft.com Cc: linux-integrity@vger.kernel.org Cc: stable@vger.kernel.org To: kexec@lists.infradead.org --- v1 -> v2: return page_address(page) instead of *page
kernel/kexec_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 9a1966207041..332204204e53 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -967,6 +967,7 @@ void *kimage_map_segment(struct kimage *image, int idx) kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages; + struct page *cma; void *vaddr = NULL; int i;
@@ -974,6 +975,9 @@ void *kimage_map_segment(struct kimage *image, int idx) size = image->segment[idx].memsz; eaddr = addr + size;
+ cma = image->segment_cma[idx]; + if (cma) + return page_address(cma); /* * Collect the source pages and map them in a contiguous VA range. */ @@ -1014,7 +1018,8 @@ void *kimage_map_segment(struct kimage *image, int idx)
void kimage_unmap_segment(void *segment_buffer) { - vunmap(segment_buffer); + if (is_vmalloc_addr(segment_buffer)) + vunmap(segment_buffer); }
struct kexec_load_limit {
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
Do you know why cma area can't be mapped into vmalloc?
Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu piliu@redhat.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Baoquan He bhe@redhat.com Cc: Alexander Graf graf@amazon.com Cc: Steven Chen chenste@linux.microsoft.com Cc: linux-integrity@vger.kernel.org Cc: stable@vger.kernel.org To: kexec@lists.infradead.org
v1 -> v2: return page_address(page) instead of *page
kernel/kexec_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 9a1966207041..332204204e53 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -967,6 +967,7 @@ void *kimage_map_segment(struct kimage *image, int idx) kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages;
- struct page *cma; void *vaddr = NULL; int i;
@@ -974,6 +975,9 @@ void *kimage_map_segment(struct kimage *image, int idx) size = image->segment[idx].memsz; eaddr = addr + size;
- cma = image->segment_cma[idx];
- if (cma)
/*return page_address(cma);*/
- Collect the source pages and map them in a contiguous VA range.
@@ -1014,7 +1018,8 @@ void *kimage_map_segment(struct kimage *image, int idx) void kimage_unmap_segment(void *segment_buffer) {
- vunmap(segment_buffer);
- if (is_vmalloc_addr(segment_buffer))
vunmap(segment_buffer);} struct kexec_load_limit { -- 2.49.0
On Thu, Nov 6, 2025 at 4:01 PM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
I am not sure what you mean. Do you mean the earlier content which I replied to you?
Do you know why cma area can't be mapped into vmalloc?
Should not the kernel direct mapping be used?
Thanks,
Pingfan
Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu piliu@redhat.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Baoquan He bhe@redhat.com Cc: Alexander Graf graf@amazon.com Cc: Steven Chen chenste@linux.microsoft.com Cc: linux-integrity@vger.kernel.org Cc: stable@vger.kernel.org To: kexec@lists.infradead.org
v1 -> v2: return page_address(page) instead of *page
kernel/kexec_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 9a1966207041..332204204e53 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -967,6 +967,7 @@ void *kimage_map_segment(struct kimage *image, int idx) kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages;
struct page *cma; void *vaddr = NULL; int i;@@ -974,6 +975,9 @@ void *kimage_map_segment(struct kimage *image, int idx) size = image->segment[idx].memsz; eaddr = addr + size;
cma = image->segment_cma[idx];if (cma)return page_address(cma); /* * Collect the source pages and map them in a contiguous VA range. */@@ -1014,7 +1018,8 @@ void *kimage_map_segment(struct kimage *image, int idx)
void kimage_unmap_segment(void *segment_buffer) {
vunmap(segment_buffer);
if (is_vmalloc_addr(segment_buffer))vunmap(segment_buffer);}
struct kexec_load_limit {
2.49.0
On 11/06/25 at 06:01pm, Pingfan Liu wrote:
On Thu, Nov 6, 2025 at 4:01 PM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
I am not sure what you mean. Do you mean the earlier content which I replied to you?
No. In v1, you return cma directly. But in v2, you return its direct mapping address, isnt' it?
Do you know why cma area can't be mapped into vmalloc?
Should not the kernel direct mapping be used?
When image->segment_cma[i] has value, image->ima_buffer_addr also contains the physical address of the cma area, why cma physical address can't be mapped into vmalloc and cause the failure and call trace?
On Fri, Nov 7, 2025 at 9:51 AM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 06:01pm, Pingfan Liu wrote:
On Thu, Nov 6, 2025 at 4:01 PM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
I am not sure what you mean. Do you mean the earlier content which I replied to you?
No. In v1, you return cma directly. But in v2, you return its direct mapping address, isnt' it?
Yes. But I think it is a fault in the code, which does not convey the expression in the commit log. Do you think I should rephrase the words "the CMA kernel address" as "the CMA kernel direct mapping address"?
Do you know why cma area can't be mapped into vmalloc?
Should not the kernel direct mapping be used?
When image->segment_cma[i] has value, image->ima_buffer_addr also contains the physical address of the cma area, why cma physical address can't be mapped into vmalloc and cause the failure and call trace?
It could be done using the vmalloc approach, but it's unnecessary. IIUC, kimage_map_segment() was introduced to provide a contiguous virtual address for IMA access, since the IND_SRC pages are scattered throughout the kernel. However, in the CMA case, there is already a contiguous virtual address in the kernel direct mapping range. Normally, when we have a physical address, we simply use phys_to_virt() to get its corresponding kernel virtual address.
Thanks,
Pingfan
On 11/07/25 at 01:13pm, Pingfan Liu wrote:
On Fri, Nov 7, 2025 at 9:51 AM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 06:01pm, Pingfan Liu wrote:
On Thu, Nov 6, 2025 at 4:01 PM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
I am not sure what you mean. Do you mean the earlier content which I replied to you?
No. In v1, you return cma directly. But in v2, you return its direct mapping address, isnt' it?
Yes. But I think it is a fault in the code, which does not convey the expression in the commit log. Do you think I should rephrase the words "the CMA kernel address" as "the CMA kernel direct mapping address"?
That's fine to me.
Do you know why cma area can't be mapped into vmalloc?
Should not the kernel direct mapping be used?
When image->segment_cma[i] has value, image->ima_buffer_addr also contains the physical address of the cma area, why cma physical address can't be mapped into vmalloc and cause the failure and call trace?
It could be done using the vmalloc approach, but it's unnecessary. IIUC, kimage_map_segment() was introduced to provide a contiguous virtual address for IMA access, since the IND_SRC pages are scattered throughout the kernel. However, in the CMA case, there is already a contiguous virtual address in the kernel direct mapping range. Normally, when we have a physical address, we simply use phys_to_virt() to get its corresponding kernel virtual address.
OK, I understand cma area is contiguous, and no need to map into vmalloc. I am wondering why in the old code mapping cma addrss into vmalloc cause the warning which you said is a IMA problem.
On Fri, Nov 07, 2025 at 01:25:41PM +0800, Baoquan He wrote:
On 11/07/25 at 01:13pm, Pingfan Liu wrote:
On Fri, Nov 7, 2025 at 9:51 AM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 06:01pm, Pingfan Liu wrote:
On Thu, Nov 6, 2025 at 4:01 PM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
I am not sure what you mean. Do you mean the earlier content which I replied to you?
No. In v1, you return cma directly. But in v2, you return its direct mapping address, isnt' it?
Yes. But I think it is a fault in the code, which does not convey the expression in the commit log. Do you think I should rephrase the words "the CMA kernel address" as "the CMA kernel direct mapping address"?
That's fine to me.
Do you know why cma area can't be mapped into vmalloc?
Should not the kernel direct mapping be used?
When image->segment_cma[i] has value, image->ima_buffer_addr also contains the physical address of the cma area, why cma physical address can't be mapped into vmalloc and cause the failure and call trace?
It could be done using the vmalloc approach, but it's unnecessary. IIUC, kimage_map_segment() was introduced to provide a contiguous virtual address for IMA access, since the IND_SRC pages are scattered throughout the kernel. However, in the CMA case, there is already a contiguous virtual address in the kernel direct mapping range. Normally, when we have a physical address, we simply use phys_to_virt() to get its corresponding kernel virtual address.
OK, I understand cma area is contiguous, and no need to map into vmalloc. I am wondering why in the old code mapping cma addrss into vmalloc cause the warning which you said is a IMA problem.
It doesn't go that far. The old code doesn't map CMA into vmalloc'd area.
void *kimage_map_segment(struct kimage *image, int idx) { ... for_each_kimage_entry(image, ptr, entry) { if (entry & IND_DESTINATION) { dest_page_addr = entry & PAGE_MASK; } else if (entry & IND_SOURCE) { if (dest_page_addr >= addr && dest_page_addr < eaddr) { src_page_addr = entry & PAGE_MASK; src_pages[i++] = virt_to_page(__va(src_page_addr)); if (i == npages) break; dest_page_addr += PAGE_SIZE; } } }
/* Sanity check. */ WARN_ON(i < npages); //--> This is the warning thrown by kernel
vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); kfree(src_pages);
if (!vaddr) pr_err("Could not map ima buffer.\n");
return vaddr; }
When CMA is used, there is no IND_SOURCE, so we have i=0 < npages. Now, I see how my words ("In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.") confused you. As for "instead of using the vmalloc'd address", I meant to mention "vmap()" approach.
Best Regards,
Pingfan
On 11/07/25 at 05:00pm, Pingfan Liu wrote:
On Fri, Nov 07, 2025 at 01:25:41PM +0800, Baoquan He wrote:
On 11/07/25 at 01:13pm, Pingfan Liu wrote:
On Fri, Nov 7, 2025 at 9:51 AM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 06:01pm, Pingfan Liu wrote:
On Thu, Nov 6, 2025 at 4:01 PM Baoquan He bhe@redhat.com wrote:
On 11/06/25 at 02:59pm, Pingfan Liu wrote: > When I tested kexec with the latest kernel, I ran into the following warning: > > [ 40.712410] ------------[ cut here ]------------ > [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 > [...] > [ 40.816047] Call trace: > [ 40.818498] kimage_map_segment+0x144/0x198 (P) > [ 40.823221] ima_kexec_post_load+0x58/0xc0 > [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 > [...] > [ 40.855423] ---[ end trace 0000000000000000 ]--- > > This is caused by the fact that kexec allocates the destination directly > in the CMA area. In that case, the CMA kernel address should be exported > directly to the IMA component, instead of using the vmalloc'd address.
Well, you didn't update the log accordingly.
I am not sure what you mean. Do you mean the earlier content which I replied to you?
No. In v1, you return cma directly. But in v2, you return its direct mapping address, isnt' it?
Yes. But I think it is a fault in the code, which does not convey the expression in the commit log. Do you think I should rephrase the words "the CMA kernel address" as "the CMA kernel direct mapping address"?
That's fine to me.
Do you know why cma area can't be mapped into vmalloc?
Should not the kernel direct mapping be used?
When image->segment_cma[i] has value, image->ima_buffer_addr also contains the physical address of the cma area, why cma physical address can't be mapped into vmalloc and cause the failure and call trace?
It could be done using the vmalloc approach, but it's unnecessary. IIUC, kimage_map_segment() was introduced to provide a contiguous virtual address for IMA access, since the IND_SRC pages are scattered throughout the kernel. However, in the CMA case, there is already a contiguous virtual address in the kernel direct mapping range. Normally, when we have a physical address, we simply use phys_to_virt() to get its corresponding kernel virtual address.
OK, I understand cma area is contiguous, and no need to map into vmalloc. I am wondering why in the old code mapping cma addrss into vmalloc cause the warning which you said is a IMA problem.
It doesn't go that far. The old code doesn't map CMA into vmalloc'd area.
void *kimage_map_segment(struct kimage *image, int idx) { ... for_each_kimage_entry(image, ptr, entry) { if (entry & IND_DESTINATION) { dest_page_addr = entry & PAGE_MASK; } else if (entry & IND_SOURCE) { if (dest_page_addr >= addr && dest_page_addr < eaddr) { src_page_addr = entry & PAGE_MASK; src_pages[i++] = virt_to_page(__va(src_page_addr)); if (i == npages) break; dest_page_addr += PAGE_SIZE; } } }
/* Sanity check. */ WARN_ON(i < npages); //--> This is the warning thrown by kernel vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); kfree(src_pages); if (!vaddr) pr_err("Could not map ima buffer.\n"); return vaddr;}
When CMA is used, there is no IND_SOURCE, so we have i=0 < npages. Now, I see how my words ("In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.") confused you. As for "instead of using the vmalloc'd address", I meant to mention "vmap()" approach.
Ok, I got it. It's truly a bug because if image->segment_cma[idx] is valid, the current kimage_map_segment() can't collect the source pages at all since they are not marked with IND_DESTINATION|IND_SOURCE as normal segment does. In that situation, we can take the direct mapping address of image->segment_cma[idx] which is more efficient, instead of collecting source pages and vmap().
On 11/06/25 at 02:59pm, Pingfan Liu wrote:
When I tested kexec with the latest kernel, I ran into the following warning:
[ 40.712410] ------------[ cut here ]------------ [ 40.712576] WARNING: CPU: 2 PID: 1562 at kernel/kexec_core.c:1001 kimage_map_segment+0x144/0x198 [...] [ 40.816047] Call trace: [ 40.818498] kimage_map_segment+0x144/0x198 (P) [ 40.823221] ima_kexec_post_load+0x58/0xc0 [ 40.827246] __do_sys_kexec_file_load+0x29c/0x368 [...] [ 40.855423] ---[ end trace 0000000000000000 ]---
This is caused by the fact that kexec allocates the destination directly in the CMA area. In that case, the CMA kernel address should be exported directly to the IMA component, instead of using the vmalloc'd address.
Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation") Signed-off-by: Pingfan Liu piliu@redhat.com Cc: Andrew Morton akpm@linux-foundation.org Cc: Baoquan He bhe@redhat.com Cc: Alexander Graf graf@amazon.com Cc: Steven Chen chenste@linux.microsoft.com Cc: linux-integrity@vger.kernel.org Cc: stable@vger.kernel.org To: kexec@lists.infradead.org
v1 -> v2: return page_address(page) instead of *page
kernel/kexec_core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 9a1966207041..332204204e53 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -967,6 +967,7 @@ void *kimage_map_segment(struct kimage *image, int idx) kimage_entry_t *ptr, entry; struct page **src_pages; unsigned int npages;
- struct page *cma; void *vaddr = NULL; int i;
@@ -974,6 +975,9 @@ void *kimage_map_segment(struct kimage *image, int idx) size = image->segment[idx].memsz; eaddr = addr + size;
- cma = image->segment_cma[idx];
- if (cma)
return page_address(cma);
This judgement can be put above the addr/size/eaddr assignment lines?
If you agree, maybe you can update the patch log by adding more details to explain the root cause so that people can understand it easier.
/* * Collect the source pages and map them in a contiguous VA range. */ @@ -1014,7 +1018,8 @@ void *kimage_map_segment(struct kimage *image, int idx) void kimage_unmap_segment(void *segment_buffer) {
- vunmap(segment_buffer);
- if (is_vmalloc_addr(segment_buffer))
vunmap(segment_buffer);} struct kexec_load_limit { -- 2.49.0
linux-stable-mirror@lists.linaro.org