1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

kernel/kexec: change the prototype of kimage_map_segment()

The kexec segment index will be required to extract the corresponding
information for that segment in kimage_map_segment().  Additionally,
kexec_segment already holds the kexec relocation destination address and
size.  Therefore, the prototype of kimage_map_segment() can be changed.

Link: https://lkml.kernel.org/r/20251216014852.8737-1-piliu@redhat.com
Fixes: 07d24902977e ("kexec: enable CMA based contiguous allocation")
Signed-off-by: Pingfan Liu <piliu@redhat.com>
Acked-by: Baoquan He <bhe@redhat.com>
Cc: Mimi Zohar <zohar@linux.ibm.com>
Cc: Roberto Sassu <roberto.sassu@huawei.com>
Cc: Alexander Graf <graf@amazon.com>
Cc: Steven Chen <chenste@linux.microsoft.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Pingfan Liu 2025-12-16 09:48:51 +08:00 committed by Andrew Morton
parent 8de524774b
commit fe55ea8593
3 changed files with 9 additions and 8 deletions

View File

@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \ #define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0) do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size); extern void *kimage_map_segment(struct kimage *image, int idx);
extern void kimage_unmap_segment(void *buffer); extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */ #else /* !CONFIG_KEXEC_CORE */
struct pt_regs; struct pt_regs;
@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { } static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; } static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; } static inline int kexec_crash_loaded(void) { return 0; }
static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size) static inline void *kimage_map_segment(struct kimage *image, int idx)
{ return NULL; } { return NULL; }
static inline void kimage_unmap_segment(void *buffer) { } static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false #define kexec_in_progress false

View File

@ -953,17 +953,20 @@ int kimage_load_segment(struct kimage *image, int idx)
return result; return result;
} }
void *kimage_map_segment(struct kimage *image, void *kimage_map_segment(struct kimage *image, int idx)
unsigned long addr, unsigned long size)
{ {
unsigned long addr, size, eaddr;
unsigned long src_page_addr, dest_page_addr = 0; unsigned long src_page_addr, dest_page_addr = 0;
unsigned long eaddr = addr + size;
kimage_entry_t *ptr, entry; kimage_entry_t *ptr, entry;
struct page **src_pages; struct page **src_pages;
unsigned int npages; unsigned int npages;
void *vaddr = NULL; void *vaddr = NULL;
int i; int i;
addr = image->segment[idx].mem;
size = image->segment[idx].memsz;
eaddr = addr + size;
/* /*
* Collect the source pages and map them in a contiguous VA range. * Collect the source pages and map them in a contiguous VA range.
*/ */

View File

@ -250,9 +250,7 @@ void ima_kexec_post_load(struct kimage *image)
if (!image->ima_buffer_addr) if (!image->ima_buffer_addr)
return; return;
ima_kexec_buffer = kimage_map_segment(image, ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index);
image->ima_buffer_addr,
image->ima_buffer_size);
if (!ima_kexec_buffer) { if (!ima_kexec_buffer) {
pr_err("Could not map measurements buffer.\n"); pr_err("Could not map measurements buffer.\n");
return; return;