mirror of
https://github.com/adulau/aha.git
synced 2024-12-27 11:16:11 +00:00
[PATCH] kexec code cleanup
o Following patch provides purely cosmetic changes and corrects CodingStyle guide lines related certain issues like below in kexec related files o braces for one line "if" statements, "for" loops, o more than 80 column wide lines, o No space after "while", "for" and "switch" key words o Changes: o take-2: Removed the extra tab before "case" key words. o take-3: Put operator at the end of line and space before "*/" Signed-off-by: Maneesh Soni <maneesh@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
4f339ecb30
commit
72414d3f1d
10 changed files with 243 additions and 211 deletions
|
@ -31,10 +31,11 @@ note_buf_t crash_notes[NR_CPUS];
|
|||
/* This keeps a track of which one is crashing cpu. */
|
||||
static int crashing_cpu;
|
||||
|
||||
static u32 *append_elf_note(u32 *buf,
|
||||
char *name, unsigned type, void *data, size_t data_len)
|
||||
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
|
||||
size_t data_len)
|
||||
{
|
||||
struct elf_note note;
|
||||
|
||||
note.n_namesz = strlen(name) + 1;
|
||||
note.n_descsz = data_len;
|
||||
note.n_type = type;
|
||||
|
@ -44,26 +45,28 @@ static u32 *append_elf_note(u32 *buf,
|
|||
buf += (note.n_namesz + 3)/4;
|
||||
memcpy(buf, data, note.n_descsz);
|
||||
buf += (note.n_descsz + 3)/4;
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static void final_note(u32 *buf)
|
||||
{
|
||||
struct elf_note note;
|
||||
|
||||
note.n_namesz = 0;
|
||||
note.n_descsz = 0;
|
||||
note.n_type = 0;
|
||||
memcpy(buf, ¬e, sizeof(note));
|
||||
}
|
||||
|
||||
|
||||
static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
|
||||
{
|
||||
struct elf_prstatus prstatus;
|
||||
u32 *buf;
|
||||
if ((cpu < 0) || (cpu >= NR_CPUS)) {
|
||||
|
||||
if ((cpu < 0) || (cpu >= NR_CPUS))
|
||||
return;
|
||||
}
|
||||
|
||||
/* Using ELF notes here is opportunistic.
|
||||
* I need a well defined structure format
|
||||
* for the data I pass, and I need tags
|
||||
|
@ -75,9 +78,8 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
|
|||
memset(&prstatus, 0, sizeof(prstatus));
|
||||
prstatus.pr_pid = current->pid;
|
||||
elf_core_copy_regs(&prstatus.pr_reg, regs);
|
||||
buf = append_elf_note(buf, "CORE", NT_PRSTATUS,
|
||||
&prstatus, sizeof(prstatus));
|
||||
|
||||
buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus,
|
||||
sizeof(prstatus));
|
||||
final_note(buf);
|
||||
}
|
||||
|
||||
|
@ -119,8 +121,8 @@ static void crash_save_self(struct pt_regs *saved_regs)
|
|||
{
|
||||
struct pt_regs regs;
|
||||
int cpu;
|
||||
cpu = smp_processor_id();
|
||||
|
||||
cpu = smp_processor_id();
|
||||
if (saved_regs)
|
||||
crash_setup_regs(®s, saved_regs);
|
||||
else
|
||||
|
@ -153,6 +155,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
|
|||
/* Assume hlt works */
|
||||
__asm__("hlt");
|
||||
for(;;);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -169,8 +172,8 @@ static void smp_send_nmi_allbutself(void)
|
|||
static void nmi_shootdown_cpus(void)
|
||||
{
|
||||
unsigned long msecs;
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
/* Would it be better to replace the trap vector here? */
|
||||
set_nmi_callback(crash_nmi_callback);
|
||||
/* Ensure the new callback function is set before sending
|
||||
|
|
|
@ -80,7 +80,8 @@ static void identity_map_page(unsigned long address)
|
|||
/* Identity map the page table entry */
|
||||
pgtable_level1[level1_index] = address | L0_ATTR;
|
||||
pgtable_level2[level2_index] = __pa(pgtable_level1) | L1_ATTR;
|
||||
set_64bit(&pgtable_level3[level3_index], __pa(pgtable_level2) | L2_ATTR);
|
||||
set_64bit(&pgtable_level3[level3_index],
|
||||
__pa(pgtable_level2) | L2_ATTR);
|
||||
|
||||
/* Flush the tlb so the new mapping takes effect.
|
||||
* Global tlb entries are not flushed but that is not an issue.
|
||||
|
@ -139,8 +140,10 @@ static void load_segments(void)
|
|||
}
|
||||
|
||||
typedef asmlinkage NORET_TYPE void (*relocate_new_kernel_t)(
|
||||
unsigned long indirection_page, unsigned long reboot_code_buffer,
|
||||
unsigned long start_address, unsigned int has_pae) ATTRIB_NORET;
|
||||
unsigned long indirection_page,
|
||||
unsigned long reboot_code_buffer,
|
||||
unsigned long start_address,
|
||||
unsigned int has_pae) ATTRIB_NORET;
|
||||
|
||||
const extern unsigned char relocate_new_kernel[];
|
||||
extern void relocate_new_kernel_end(void);
|
||||
|
@ -180,20 +183,23 @@ NORET_TYPE void machine_kexec(struct kimage *image)
|
|||
{
|
||||
unsigned long page_list;
|
||||
unsigned long reboot_code_buffer;
|
||||
|
||||
relocate_new_kernel_t rnk;
|
||||
|
||||
/* Interrupts aren't acceptable while we reboot */
|
||||
local_irq_disable();
|
||||
|
||||
/* Compute some offsets */
|
||||
reboot_code_buffer = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
|
||||
reboot_code_buffer = page_to_pfn(image->control_code_page)
|
||||
<< PAGE_SHIFT;
|
||||
page_list = image->head;
|
||||
|
||||
/* Set up an identity mapping for the reboot_code_buffer */
|
||||
identity_map_page(reboot_code_buffer);
|
||||
|
||||
/* copy it out */
|
||||
memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
|
||||
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
|
||||
/* The segment registers are funny things, they are
|
||||
* automatically loaded from a table, in memory wherever you
|
||||
|
|
|
@ -21,24 +21,23 @@
|
|||
#include <asm/machdep.h>
|
||||
|
||||
typedef NORET_TYPE void (*relocate_new_kernel_t)(
|
||||
unsigned long indirection_page, unsigned long reboot_code_buffer,
|
||||
unsigned long start_address) ATTRIB_NORET;
|
||||
unsigned long indirection_page,
|
||||
unsigned long reboot_code_buffer,
|
||||
unsigned long start_address) ATTRIB_NORET;
|
||||
|
||||
const extern unsigned char relocate_new_kernel[];
|
||||
const extern unsigned int relocate_new_kernel_size;
|
||||
|
||||
void machine_shutdown(void)
|
||||
{
|
||||
if (ppc_md.machine_shutdown) {
|
||||
if (ppc_md.machine_shutdown)
|
||||
ppc_md.machine_shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
if (ppc_md.machine_crash_shutdown) {
|
||||
if (ppc_md.machine_crash_shutdown)
|
||||
ppc_md.machine_crash_shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -48,9 +47,8 @@ void machine_crash_shutdown(struct pt_regs *regs)
|
|||
*/
|
||||
int machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
if (ppc_md.machine_kexec_prepare) {
|
||||
if (ppc_md.machine_kexec_prepare)
|
||||
return ppc_md.machine_kexec_prepare(image);
|
||||
}
|
||||
/*
|
||||
* Fail if platform doesn't provide its own machine_kexec_prepare
|
||||
* implementation.
|
||||
|
@ -60,9 +58,8 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
|
||||
void machine_kexec_cleanup(struct kimage *image)
|
||||
{
|
||||
if (ppc_md.machine_kexec_cleanup) {
|
||||
if (ppc_md.machine_kexec_cleanup)
|
||||
ppc_md.machine_kexec_cleanup(image);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -71,9 +68,9 @@ void machine_kexec_cleanup(struct kimage *image)
|
|||
*/
|
||||
NORET_TYPE void machine_kexec(struct kimage *image)
|
||||
{
|
||||
if (ppc_md.machine_kexec) {
|
||||
if (ppc_md.machine_kexec)
|
||||
ppc_md.machine_kexec(image);
|
||||
} else {
|
||||
else {
|
||||
/*
|
||||
* Fall back to normal restart if platform doesn't provide
|
||||
* its own kexec function, and user insist to kexec...
|
||||
|
@ -83,7 +80,6 @@ NORET_TYPE void machine_kexec(struct kimage *image)
|
|||
for(;;);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This is a generic machine_kexec function suitable at least for
|
||||
* non-OpenFirmware embedded platforms.
|
||||
|
@ -104,15 +100,15 @@ void machine_kexec_simple(struct kimage *image)
|
|||
|
||||
/* we need both effective and real address here */
|
||||
reboot_code_buffer =
|
||||
(unsigned long)page_address(image->control_code_page);
|
||||
(unsigned long)page_address(image->control_code_page);
|
||||
reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer);
|
||||
|
||||
/* copy our kernel relocation code to the control code page */
|
||||
memcpy((void *)reboot_code_buffer,
|
||||
relocate_new_kernel, relocate_new_kernel_size);
|
||||
memcpy((void *)reboot_code_buffer, relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
|
||||
flush_icache_range(reboot_code_buffer,
|
||||
reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
|
||||
reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
|
||||
printk(KERN_INFO "Bye!\n");
|
||||
|
||||
/* now call it */
|
||||
|
|
|
@ -58,7 +58,7 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
* handle the virtual mode, we must make sure no destination
|
||||
* overlaps kernel static data or bss.
|
||||
*/
|
||||
for(i = 0; i < image->nr_segments; i++)
|
||||
for (i = 0; i < image->nr_segments; i++)
|
||||
if (image->segment[i].mem < __pa(_end))
|
||||
return -ETXTBSY;
|
||||
|
||||
|
@ -76,7 +76,7 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
low = __pa(htab_address);
|
||||
high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE;
|
||||
|
||||
for(i = 0; i < image->nr_segments; i++) {
|
||||
for (i = 0; i < image->nr_segments; i++) {
|
||||
begin = image->segment[i].mem;
|
||||
end = begin + image->segment[i].memsz;
|
||||
|
||||
|
@ -98,7 +98,7 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
low = *basep;
|
||||
high = low + (*sizep);
|
||||
|
||||
for(i = 0; i < image->nr_segments; i++) {
|
||||
for (i = 0; i < image->nr_segments; i++) {
|
||||
begin = image->segment[i].mem;
|
||||
end = begin + image->segment[i].memsz;
|
||||
|
||||
|
@ -274,7 +274,8 @@ union thread_union kexec_stack
|
|||
|
||||
/* Our assembly helper, in kexec_stub.S */
|
||||
extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start,
|
||||
void *image, void *control, void (*clear_all)(void)) ATTRIB_NORET;
|
||||
void *image, void *control,
|
||||
void (*clear_all)(void)) ATTRIB_NORET;
|
||||
|
||||
/* too late to fail here */
|
||||
void machine_kexec(struct kimage *image)
|
||||
|
|
|
@ -67,7 +67,7 @@ machine_kexec(struct kimage *image)
|
|||
ctl_clear_bit(0,28);
|
||||
|
||||
on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
|
||||
for(;;);
|
||||
for (;;);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -85,7 +85,7 @@ kexec_halt_all_cpus(void *kernel_image)
|
|||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
while(!smp_cpu_not_running(cpu))
|
||||
while (!smp_cpu_not_running(cpu))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
|
|
|
@ -32,29 +32,31 @@
|
|||
#define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||
#define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
|
||||
|
||||
static void init_level2_page(
|
||||
u64 *level2p, unsigned long addr)
|
||||
static void init_level2_page(u64 *level2p, unsigned long addr)
|
||||
{
|
||||
unsigned long end_addr;
|
||||
|
||||
addr &= PAGE_MASK;
|
||||
end_addr = addr + LEVEL2_SIZE;
|
||||
while(addr < end_addr) {
|
||||
while (addr < end_addr) {
|
||||
*(level2p++) = addr | L1_ATTR;
|
||||
addr += LEVEL1_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static int init_level3_page(struct kimage *image,
|
||||
u64 *level3p, unsigned long addr, unsigned long last_addr)
|
||||
static int init_level3_page(struct kimage *image, u64 *level3p,
|
||||
unsigned long addr, unsigned long last_addr)
|
||||
{
|
||||
unsigned long end_addr;
|
||||
int result;
|
||||
|
||||
result = 0;
|
||||
addr &= PAGE_MASK;
|
||||
end_addr = addr + LEVEL3_SIZE;
|
||||
while((addr < last_addr) && (addr < end_addr)) {
|
||||
while ((addr < last_addr) && (addr < end_addr)) {
|
||||
struct page *page;
|
||||
u64 *level2p;
|
||||
|
||||
page = kimage_alloc_control_pages(image, 0);
|
||||
if (!page) {
|
||||
result = -ENOMEM;
|
||||
|
@ -66,7 +68,7 @@ static int init_level3_page(struct kimage *image,
|
|||
addr += LEVEL2_SIZE;
|
||||
}
|
||||
/* clear the unused entries */
|
||||
while(addr < end_addr) {
|
||||
while (addr < end_addr) {
|
||||
*(level3p++) = 0;
|
||||
addr += LEVEL2_SIZE;
|
||||
}
|
||||
|
@ -75,17 +77,19 @@ out:
|
|||
}
|
||||
|
||||
|
||||
static int init_level4_page(struct kimage *image,
|
||||
u64 *level4p, unsigned long addr, unsigned long last_addr)
|
||||
static int init_level4_page(struct kimage *image, u64 *level4p,
|
||||
unsigned long addr, unsigned long last_addr)
|
||||
{
|
||||
unsigned long end_addr;
|
||||
int result;
|
||||
|
||||
result = 0;
|
||||
addr &= PAGE_MASK;
|
||||
end_addr = addr + LEVEL4_SIZE;
|
||||
while((addr < last_addr) && (addr < end_addr)) {
|
||||
while ((addr < last_addr) && (addr < end_addr)) {
|
||||
struct page *page;
|
||||
u64 *level3p;
|
||||
|
||||
page = kimage_alloc_control_pages(image, 0);
|
||||
if (!page) {
|
||||
result = -ENOMEM;
|
||||
|
@ -100,11 +104,11 @@ static int init_level4_page(struct kimage *image,
|
|||
addr += LEVEL3_SIZE;
|
||||
}
|
||||
/* clear the unused entries */
|
||||
while(addr < end_addr) {
|
||||
while (addr < end_addr) {
|
||||
*(level4p++) = 0;
|
||||
addr += LEVEL3_SIZE;
|
||||
}
|
||||
out:
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -113,7 +117,7 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
|
|||
{
|
||||
u64 *level4p;
|
||||
level4p = (u64 *)__va(start_pgtable);
|
||||
return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
|
||||
return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void set_idt(void *newidt, u16 limit)
|
||||
|
@ -159,9 +163,10 @@ static void load_segments(void)
|
|||
#undef __STR
|
||||
}
|
||||
|
||||
typedef NORET_TYPE void (*relocate_new_kernel_t)(
|
||||
unsigned long indirection_page, unsigned long control_code_buffer,
|
||||
unsigned long start_address, unsigned long pgtable) ATTRIB_NORET;
|
||||
typedef NORET_TYPE void (*relocate_new_kernel_t)(unsigned long indirection_page,
|
||||
unsigned long control_code_buffer,
|
||||
unsigned long start_address,
|
||||
unsigned long pgtable) ATTRIB_NORET;
|
||||
|
||||
const extern unsigned char relocate_new_kernel[];
|
||||
const extern unsigned long relocate_new_kernel_size;
|
||||
|
@ -172,17 +177,17 @@ int machine_kexec_prepare(struct kimage *image)
|
|||
int result;
|
||||
|
||||
/* Calculate the offsets */
|
||||
start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
|
||||
start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
|
||||
control_code_buffer = start_pgtable + 4096UL;
|
||||
|
||||
/* Setup the identity mapped 64bit page table */
|
||||
result = init_pgtable(image, start_pgtable);
|
||||
if (result) {
|
||||
if (result)
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Place the code in the reboot code buffer */
|
||||
memcpy(__va(control_code_buffer), relocate_new_kernel, relocate_new_kernel_size);
|
||||
memcpy(__va(control_code_buffer), relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -207,8 +212,8 @@ NORET_TYPE void machine_kexec(struct kimage *image)
|
|||
local_irq_disable();
|
||||
|
||||
/* Calculate the offsets */
|
||||
page_list = image->head;
|
||||
start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
|
||||
page_list = image->head;
|
||||
start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
|
||||
control_code_buffer = start_pgtable + 4096UL;
|
||||
|
||||
/* Set the low half of the page table to my identity mapped
|
||||
|
|
|
@ -287,7 +287,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
|
|||
size_t read = 0, csize;
|
||||
int rc = 0;
|
||||
|
||||
while(count) {
|
||||
while (count) {
|
||||
pfn = *ppos / PAGE_SIZE;
|
||||
if (pfn > saved_max_pfn)
|
||||
return read;
|
||||
|
|
|
@ -91,14 +91,17 @@ extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
|
|||
extern int machine_kexec_prepare(struct kimage *image);
|
||||
extern void machine_kexec_cleanup(struct kimage *image);
|
||||
extern asmlinkage long sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments, struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments, struct compat_kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
unsigned long nr_segments,
|
||||
struct compat_kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
#endif
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order);
|
||||
extern struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order);
|
||||
extern void crash_kexec(struct pt_regs *);
|
||||
int kexec_should_crash(struct task_struct *);
|
||||
extern struct kimage *kexec_image;
|
||||
|
|
|
@ -159,9 +159,9 @@ asmlinkage long sys_shutdown(int, int);
|
|||
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd,
|
||||
void __user *arg);
|
||||
asmlinkage long sys_restart_syscall(void);
|
||||
asmlinkage long sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments, struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags);
|
||||
|
||||
asmlinkage long sys_exit(int error_code);
|
||||
asmlinkage void sys_exit_group(int error_code);
|
||||
|
|
302
kernel/kexec.c
302
kernel/kexec.c
|
@ -87,12 +87,15 @@ int kexec_should_crash(struct task_struct *p)
|
|||
*/
|
||||
#define KIMAGE_NO_DEST (-1UL)
|
||||
|
||||
static int kimage_is_destination_range(
|
||||
struct kimage *image, unsigned long start, unsigned long end);
|
||||
static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long dest);
|
||||
static int kimage_is_destination_range(struct kimage *image,
|
||||
unsigned long start, unsigned long end);
|
||||
static struct page *kimage_alloc_page(struct kimage *image,
|
||||
unsigned int gfp_mask,
|
||||
unsigned long dest);
|
||||
|
||||
static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
|
||||
unsigned long nr_segments, struct kexec_segment __user *segments)
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments)
|
||||
{
|
||||
size_t segment_bytes;
|
||||
struct kimage *image;
|
||||
|
@ -102,9 +105,9 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
|
|||
/* Allocate a controlling structure */
|
||||
result = -ENOMEM;
|
||||
image = kmalloc(sizeof(*image), GFP_KERNEL);
|
||||
if (!image) {
|
||||
if (!image)
|
||||
goto out;
|
||||
}
|
||||
|
||||
memset(image, 0, sizeof(*image));
|
||||
image->head = 0;
|
||||
image->entry = &image->head;
|
||||
|
@ -145,6 +148,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
|
|||
result = -EADDRNOTAVAIL;
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
unsigned long mstart, mend;
|
||||
|
||||
mstart = image->segment[i].mem;
|
||||
mend = mstart + image->segment[i].memsz;
|
||||
if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
|
||||
|
@ -159,12 +163,13 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
|
|||
* easy explanation as one segment stops on another.
|
||||
*/
|
||||
result = -EINVAL;
|
||||
for(i = 0; i < nr_segments; i++) {
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
unsigned long mstart, mend;
|
||||
unsigned long j;
|
||||
|
||||
mstart = image->segment[i].mem;
|
||||
mend = mstart + image->segment[i].memsz;
|
||||
for(j = 0; j < i; j++) {
|
||||
for (j = 0; j < i; j++) {
|
||||
unsigned long pstart, pend;
|
||||
pstart = image->segment[j].mem;
|
||||
pend = pstart + image->segment[j].memsz;
|
||||
|
@ -180,25 +185,25 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
|
|||
* later on.
|
||||
*/
|
||||
result = -EINVAL;
|
||||
for(i = 0; i < nr_segments; i++) {
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
if (image->segment[i].bufsz > image->segment[i].memsz)
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
result = 0;
|
||||
out:
|
||||
if (result == 0) {
|
||||
out:
|
||||
if (result == 0)
|
||||
*rimage = image;
|
||||
} else {
|
||||
else
|
||||
kfree(image);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
|
||||
unsigned long nr_segments, struct kexec_segment __user *segments)
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments)
|
||||
{
|
||||
int result;
|
||||
struct kimage *image;
|
||||
|
@ -206,9 +211,9 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
|
|||
/* Allocate and initialize a controlling structure */
|
||||
image = NULL;
|
||||
result = do_kimage_alloc(&image, entry, nr_segments, segments);
|
||||
if (result) {
|
||||
if (result)
|
||||
goto out;
|
||||
}
|
||||
|
||||
*rimage = image;
|
||||
|
||||
/*
|
||||
|
@ -218,7 +223,7 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
|
|||
*/
|
||||
result = -ENOMEM;
|
||||
image->control_code_page = kimage_alloc_control_pages(image,
|
||||
get_order(KEXEC_CONTROL_CODE_SIZE));
|
||||
get_order(KEXEC_CONTROL_CODE_SIZE));
|
||||
if (!image->control_code_page) {
|
||||
printk(KERN_ERR "Could not allocate control_code_buffer\n");
|
||||
goto out;
|
||||
|
@ -226,16 +231,17 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
|
|||
|
||||
result = 0;
|
||||
out:
|
||||
if (result == 0) {
|
||||
if (result == 0)
|
||||
*rimage = image;
|
||||
} else {
|
||||
else
|
||||
kfree(image);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
|
||||
unsigned long nr_segments, struct kexec_segment *segments)
|
||||
unsigned long nr_segments,
|
||||
struct kexec_segment *segments)
|
||||
{
|
||||
int result;
|
||||
struct kimage *image;
|
||||
|
@ -250,9 +256,8 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
|
|||
|
||||
/* Allocate and initialize a controlling structure */
|
||||
result = do_kimage_alloc(&image, entry, nr_segments, segments);
|
||||
if (result) {
|
||||
if (result)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Enable the special crash kernel control page
|
||||
* allocation policy.
|
||||
|
@ -272,6 +277,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
|
|||
result = -EADDRNOTAVAIL;
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
unsigned long mstart, mend;
|
||||
|
||||
mstart = image->segment[i].mem;
|
||||
mend = mstart + image->segment[i].memsz - 1;
|
||||
/* Ensure we are within the crash kernel limits */
|
||||
|
@ -279,7 +285,6 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
|
|||
goto out;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Find a location for the control code buffer, and add
|
||||
* the vector of segments so that it's pages will also be
|
||||
|
@ -287,80 +292,84 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
|
|||
*/
|
||||
result = -ENOMEM;
|
||||
image->control_code_page = kimage_alloc_control_pages(image,
|
||||
get_order(KEXEC_CONTROL_CODE_SIZE));
|
||||
get_order(KEXEC_CONTROL_CODE_SIZE));
|
||||
if (!image->control_code_page) {
|
||||
printk(KERN_ERR "Could not allocate control_code_buffer\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
result = 0;
|
||||
out:
|
||||
if (result == 0) {
|
||||
out:
|
||||
if (result == 0)
|
||||
*rimage = image;
|
||||
} else {
|
||||
else
|
||||
kfree(image);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int kimage_is_destination_range(
|
||||
struct kimage *image, unsigned long start, unsigned long end)
|
||||
static int kimage_is_destination_range(struct kimage *image,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < image->nr_segments; i++) {
|
||||
unsigned long mstart, mend;
|
||||
|
||||
mstart = image->segment[i].mem;
|
||||
mend = mstart + image->segment[i].memsz;
|
||||
if ((end > mstart) && (start < mend)) {
|
||||
mend = mstart + image->segment[i].memsz;
|
||||
if ((end > mstart) && (start < mend))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *kimage_alloc_pages(unsigned int gfp_mask, unsigned int order)
|
||||
static struct page *kimage_alloc_pages(unsigned int gfp_mask,
|
||||
unsigned int order)
|
||||
{
|
||||
struct page *pages;
|
||||
|
||||
pages = alloc_pages(gfp_mask, order);
|
||||
if (pages) {
|
||||
unsigned int count, i;
|
||||
pages->mapping = NULL;
|
||||
pages->private = order;
|
||||
count = 1 << order;
|
||||
for(i = 0; i < count; i++) {
|
||||
for (i = 0; i < count; i++)
|
||||
SetPageReserved(pages + i);
|
||||
}
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static void kimage_free_pages(struct page *page)
|
||||
{
|
||||
unsigned int order, count, i;
|
||||
|
||||
order = page->private;
|
||||
count = 1 << order;
|
||||
for(i = 0; i < count; i++) {
|
||||
for (i = 0; i < count; i++)
|
||||
ClearPageReserved(page + i);
|
||||
}
|
||||
__free_pages(page, order);
|
||||
}
|
||||
|
||||
static void kimage_free_page_list(struct list_head *list)
|
||||
{
|
||||
struct list_head *pos, *next;
|
||||
|
||||
list_for_each_safe(pos, next, list) {
|
||||
struct page *page;
|
||||
|
||||
page = list_entry(pos, struct page, lru);
|
||||
list_del(&page->lru);
|
||||
|
||||
kimage_free_pages(page);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page *kimage_alloc_normal_control_pages(
|
||||
struct kimage *image, unsigned int order)
|
||||
static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
|
||||
unsigned int order)
|
||||
{
|
||||
/* Control pages are special, they are the intermediaries
|
||||
* that are needed while we copy the rest of the pages
|
||||
|
@ -387,6 +396,7 @@ static struct page *kimage_alloc_normal_control_pages(
|
|||
*/
|
||||
do {
|
||||
unsigned long pfn, epfn, addr, eaddr;
|
||||
|
||||
pages = kimage_alloc_pages(GFP_KERNEL, order);
|
||||
if (!pages)
|
||||
break;
|
||||
|
@ -395,12 +405,12 @@ static struct page *kimage_alloc_normal_control_pages(
|
|||
addr = pfn << PAGE_SHIFT;
|
||||
eaddr = epfn << PAGE_SHIFT;
|
||||
if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
|
||||
kimage_is_destination_range(image, addr, eaddr))
|
||||
{
|
||||
kimage_is_destination_range(image, addr, eaddr)) {
|
||||
list_add(&pages->lru, &extra_pages);
|
||||
pages = NULL;
|
||||
}
|
||||
} while(!pages);
|
||||
} while (!pages);
|
||||
|
||||
if (pages) {
|
||||
/* Remember the allocated page... */
|
||||
list_add(&pages->lru, &image->control_pages);
|
||||
|
@ -420,12 +430,12 @@ static struct page *kimage_alloc_normal_control_pages(
|
|||
* For now it is simpler to just free the pages.
|
||||
*/
|
||||
kimage_free_page_list(&extra_pages);
|
||||
return pages;
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static struct page *kimage_alloc_crash_control_pages(
|
||||
struct kimage *image, unsigned int order)
|
||||
static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
|
||||
unsigned int order)
|
||||
{
|
||||
/* Control pages are special, they are the intermediaries
|
||||
* that are needed while we copy the rest of the pages
|
||||
|
@ -450,21 +460,22 @@ static struct page *kimage_alloc_crash_control_pages(
|
|||
*/
|
||||
unsigned long hole_start, hole_end, size;
|
||||
struct page *pages;
|
||||
|
||||
pages = NULL;
|
||||
size = (1 << order) << PAGE_SHIFT;
|
||||
hole_start = (image->control_page + (size - 1)) & ~(size - 1);
|
||||
hole_end = hole_start + size - 1;
|
||||
while(hole_end <= crashk_res.end) {
|
||||
while (hole_end <= crashk_res.end) {
|
||||
unsigned long i;
|
||||
if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT) {
|
||||
|
||||
if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
|
||||
break;
|
||||
}
|
||||
if (hole_end > crashk_res.end) {
|
||||
if (hole_end > crashk_res.end)
|
||||
break;
|
||||
}
|
||||
/* See if I overlap any of the segments */
|
||||
for(i = 0; i < image->nr_segments; i++) {
|
||||
for (i = 0; i < image->nr_segments; i++) {
|
||||
unsigned long mstart, mend;
|
||||
|
||||
mstart = image->segment[i].mem;
|
||||
mend = mstart + image->segment[i].memsz - 1;
|
||||
if ((hole_end >= mstart) && (hole_start <= mend)) {
|
||||
|
@ -480,18 +491,19 @@ static struct page *kimage_alloc_crash_control_pages(
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (pages) {
|
||||
if (pages)
|
||||
image->control_page = hole_end;
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
|
||||
struct page *kimage_alloc_control_pages(
|
||||
struct kimage *image, unsigned int order)
|
||||
struct page *kimage_alloc_control_pages(struct kimage *image,
|
||||
unsigned int order)
|
||||
{
|
||||
struct page *pages = NULL;
|
||||
switch(image->type) {
|
||||
|
||||
switch (image->type) {
|
||||
case KEXEC_TYPE_DEFAULT:
|
||||
pages = kimage_alloc_normal_control_pages(image, order);
|
||||
break;
|
||||
|
@ -499,43 +511,46 @@ struct page *kimage_alloc_control_pages(
|
|||
pages = kimage_alloc_crash_control_pages(image, order);
|
||||
break;
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
|
||||
{
|
||||
if (*image->entry != 0) {
|
||||
if (*image->entry != 0)
|
||||
image->entry++;
|
||||
}
|
||||
|
||||
if (image->entry == image->last_entry) {
|
||||
kimage_entry_t *ind_page;
|
||||
struct page *page;
|
||||
|
||||
page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
|
||||
if (!page) {
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ind_page = page_address(page);
|
||||
*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
|
||||
image->entry = ind_page;
|
||||
image->last_entry =
|
||||
ind_page + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
|
||||
image->last_entry = ind_page +
|
||||
((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
|
||||
}
|
||||
*image->entry = entry;
|
||||
image->entry++;
|
||||
*image->entry = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kimage_set_destination(
|
||||
struct kimage *image, unsigned long destination)
|
||||
static int kimage_set_destination(struct kimage *image,
|
||||
unsigned long destination)
|
||||
{
|
||||
int result;
|
||||
|
||||
destination &= PAGE_MASK;
|
||||
result = kimage_add_entry(image, destination | IND_DESTINATION);
|
||||
if (result == 0) {
|
||||
if (result == 0)
|
||||
image->destination = destination;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -546,9 +561,9 @@ static int kimage_add_page(struct kimage *image, unsigned long page)
|
|||
|
||||
page &= PAGE_MASK;
|
||||
result = kimage_add_entry(image, page | IND_SOURCE);
|
||||
if (result == 0) {
|
||||
if (result == 0)
|
||||
image->destination += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -564,10 +579,11 @@ static void kimage_free_extra_pages(struct kimage *image)
|
|||
}
|
||||
static int kimage_terminate(struct kimage *image)
|
||||
{
|
||||
if (*image->entry != 0) {
|
||||
if (*image->entry != 0)
|
||||
image->entry++;
|
||||
}
|
||||
|
||||
*image->entry = IND_DONE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -591,26 +607,24 @@ static void kimage_free(struct kimage *image)
|
|||
|
||||
if (!image)
|
||||
return;
|
||||
|
||||
kimage_free_extra_pages(image);
|
||||
for_each_kimage_entry(image, ptr, entry) {
|
||||
if (entry & IND_INDIRECTION) {
|
||||
/* Free the previous indirection page */
|
||||
if (ind & IND_INDIRECTION) {
|
||||
if (ind & IND_INDIRECTION)
|
||||
kimage_free_entry(ind);
|
||||
}
|
||||
/* Save this indirection page until we are
|
||||
* done with it.
|
||||
*/
|
||||
ind = entry;
|
||||
}
|
||||
else if (entry & IND_SOURCE) {
|
||||
else if (entry & IND_SOURCE)
|
||||
kimage_free_entry(entry);
|
||||
}
|
||||
}
|
||||
/* Free the final indirection page */
|
||||
if (ind & IND_INDIRECTION) {
|
||||
if (ind & IND_INDIRECTION)
|
||||
kimage_free_entry(ind);
|
||||
}
|
||||
|
||||
/* Handle any machine specific cleanup */
|
||||
machine_kexec_cleanup(image);
|
||||
|
@ -620,26 +634,28 @@ static void kimage_free(struct kimage *image)
|
|||
kfree(image);
|
||||
}
|
||||
|
||||
static kimage_entry_t *kimage_dst_used(struct kimage *image, unsigned long page)
|
||||
static kimage_entry_t *kimage_dst_used(struct kimage *image,
|
||||
unsigned long page)
|
||||
{
|
||||
kimage_entry_t *ptr, entry;
|
||||
unsigned long destination = 0;
|
||||
|
||||
for_each_kimage_entry(image, ptr, entry) {
|
||||
if (entry & IND_DESTINATION) {
|
||||
if (entry & IND_DESTINATION)
|
||||
destination = entry & PAGE_MASK;
|
||||
}
|
||||
else if (entry & IND_SOURCE) {
|
||||
if (page == destination) {
|
||||
if (page == destination)
|
||||
return ptr;
|
||||
}
|
||||
destination += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mask, unsigned long destination)
|
||||
static struct page *kimage_alloc_page(struct kimage *image,
|
||||
unsigned int gfp_mask,
|
||||
unsigned long destination)
|
||||
{
|
||||
/*
|
||||
* Here we implement safeguards to ensure that a source page
|
||||
|
@ -679,11 +695,11 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas
|
|||
|
||||
/* Allocate a page, if we run out of memory give up */
|
||||
page = kimage_alloc_pages(gfp_mask, 0);
|
||||
if (!page) {
|
||||
if (!page)
|
||||
return 0;
|
||||
}
|
||||
/* If the page cannot be used file it away */
|
||||
if (page_to_pfn(page) > (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
|
||||
if (page_to_pfn(page) >
|
||||
(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
|
||||
list_add(&page->lru, &image->unuseable_pages);
|
||||
continue;
|
||||
}
|
||||
|
@ -694,7 +710,8 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas
|
|||
break;
|
||||
|
||||
/* If the page is not a destination page use it */
|
||||
if (!kimage_is_destination_range(image, addr, addr + PAGE_SIZE))
|
||||
if (!kimage_is_destination_range(image, addr,
|
||||
addr + PAGE_SIZE))
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -727,11 +744,12 @@ static struct page *kimage_alloc_page(struct kimage *image, unsigned int gfp_mas
|
|||
list_add(&page->lru, &image->dest_pages);
|
||||
}
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
static int kimage_load_normal_segment(struct kimage *image,
|
||||
struct kexec_segment *segment)
|
||||
struct kexec_segment *segment)
|
||||
{
|
||||
unsigned long maddr;
|
||||
unsigned long ubytes, mbytes;
|
||||
|
@ -745,34 +763,36 @@ static int kimage_load_normal_segment(struct kimage *image,
|
|||
maddr = segment->mem;
|
||||
|
||||
result = kimage_set_destination(image, maddr);
|
||||
if (result < 0) {
|
||||
if (result < 0)
|
||||
goto out;
|
||||
}
|
||||
while(mbytes) {
|
||||
|
||||
while (mbytes) {
|
||||
struct page *page;
|
||||
char *ptr;
|
||||
size_t uchunk, mchunk;
|
||||
|
||||
page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
|
||||
if (page == 0) {
|
||||
result = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
result = kimage_add_page(image, page_to_pfn(page) << PAGE_SHIFT);
|
||||
if (result < 0) {
|
||||
result = kimage_add_page(image, page_to_pfn(page)
|
||||
<< PAGE_SHIFT);
|
||||
if (result < 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ptr = kmap(page);
|
||||
/* Start with a clear page */
|
||||
memset(ptr, 0, PAGE_SIZE);
|
||||
ptr += maddr & ~PAGE_MASK;
|
||||
mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
|
||||
if (mchunk > mbytes) {
|
||||
if (mchunk > mbytes)
|
||||
mchunk = mbytes;
|
||||
}
|
||||
|
||||
uchunk = mchunk;
|
||||
if (uchunk > ubytes) {
|
||||
if (uchunk > ubytes)
|
||||
uchunk = ubytes;
|
||||
}
|
||||
|
||||
result = copy_from_user(ptr, buf, uchunk);
|
||||
kunmap(page);
|
||||
if (result) {
|
||||
|
@ -784,12 +804,12 @@ static int kimage_load_normal_segment(struct kimage *image,
|
|||
buf += mchunk;
|
||||
mbytes -= mchunk;
|
||||
}
|
||||
out:
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
static int kimage_load_crash_segment(struct kimage *image,
|
||||
struct kexec_segment *segment)
|
||||
struct kexec_segment *segment)
|
||||
{
|
||||
/* For crash dumps kernels we simply copy the data from
|
||||
* user space to it's destination.
|
||||
|
@ -805,10 +825,11 @@ static int kimage_load_crash_segment(struct kimage *image,
|
|||
ubytes = segment->bufsz;
|
||||
mbytes = segment->memsz;
|
||||
maddr = segment->mem;
|
||||
while(mbytes) {
|
||||
while (mbytes) {
|
||||
struct page *page;
|
||||
char *ptr;
|
||||
size_t uchunk, mchunk;
|
||||
|
||||
page = pfn_to_page(maddr >> PAGE_SHIFT);
|
||||
if (page == 0) {
|
||||
result = -ENOMEM;
|
||||
|
@ -817,9 +838,9 @@ static int kimage_load_crash_segment(struct kimage *image,
|
|||
ptr = kmap(page);
|
||||
ptr += maddr & ~PAGE_MASK;
|
||||
mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
|
||||
if (mchunk > mbytes) {
|
||||
if (mchunk > mbytes)
|
||||
mchunk = mbytes;
|
||||
}
|
||||
|
||||
uchunk = mchunk;
|
||||
if (uchunk > ubytes) {
|
||||
uchunk = ubytes;
|
||||
|
@ -837,15 +858,16 @@ static int kimage_load_crash_segment(struct kimage *image,
|
|||
buf += mchunk;
|
||||
mbytes -= mchunk;
|
||||
}
|
||||
out:
|
||||
out:
|
||||
return result;
|
||||
}
|
||||
|
||||
static int kimage_load_segment(struct kimage *image,
|
||||
struct kexec_segment *segment)
|
||||
struct kexec_segment *segment)
|
||||
{
|
||||
int result = -ENOMEM;
|
||||
switch(image->type) {
|
||||
|
||||
switch (image->type) {
|
||||
case KEXEC_TYPE_DEFAULT:
|
||||
result = kimage_load_normal_segment(image, segment);
|
||||
break;
|
||||
|
@ -853,6 +875,7 @@ static int kimage_load_segment(struct kimage *image,
|
|||
result = kimage_load_crash_segment(image, segment);
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -885,9 +908,9 @@ static struct kimage *kexec_crash_image = NULL;
|
|||
*/
|
||||
static int kexec_lock = 0;
|
||||
|
||||
asmlinkage long sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments, struct kexec_segment __user *segments,
|
||||
unsigned long flags)
|
||||
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
|
||||
struct kexec_segment __user *segments,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct kimage **dest_image, *image;
|
||||
int locked;
|
||||
|
@ -907,9 +930,7 @@ asmlinkage long sys_kexec_load(unsigned long entry,
|
|||
/* Verify we are on the appropriate architecture */
|
||||
if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
|
||||
((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Put an artificial cap on the number
|
||||
* of segments passed to kexec_load.
|
||||
|
@ -929,58 +950,59 @@ asmlinkage long sys_kexec_load(unsigned long entry,
|
|||
* KISS: always take the mutex.
|
||||
*/
|
||||
locked = xchg(&kexec_lock, 1);
|
||||
if (locked) {
|
||||
if (locked)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
dest_image = &kexec_image;
|
||||
if (flags & KEXEC_ON_CRASH) {
|
||||
if (flags & KEXEC_ON_CRASH)
|
||||
dest_image = &kexec_crash_image;
|
||||
}
|
||||
if (nr_segments > 0) {
|
||||
unsigned long i;
|
||||
|
||||
/* Loading another kernel to reboot into */
|
||||
if ((flags & KEXEC_ON_CRASH) == 0) {
|
||||
result = kimage_normal_alloc(&image, entry, nr_segments, segments);
|
||||
}
|
||||
if ((flags & KEXEC_ON_CRASH) == 0)
|
||||
result = kimage_normal_alloc(&image, entry,
|
||||
nr_segments, segments);
|
||||
/* Loading another kernel to switch to if this one crashes */
|
||||
else if (flags & KEXEC_ON_CRASH) {
|
||||
/* Free any current crash dump kernel before
|
||||
* we corrupt it.
|
||||
*/
|
||||
kimage_free(xchg(&kexec_crash_image, NULL));
|
||||
result = kimage_crash_alloc(&image, entry, nr_segments, segments);
|
||||
result = kimage_crash_alloc(&image, entry,
|
||||
nr_segments, segments);
|
||||
}
|
||||
if (result) {
|
||||
if (result)
|
||||
goto out;
|
||||
}
|
||||
|
||||
result = machine_kexec_prepare(image);
|
||||
if (result) {
|
||||
if (result)
|
||||
goto out;
|
||||
}
|
||||
for(i = 0; i < nr_segments; i++) {
|
||||
|
||||
for (i = 0; i < nr_segments; i++) {
|
||||
result = kimage_load_segment(image, &image->segment[i]);
|
||||
if (result) {
|
||||
if (result)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
result = kimage_terminate(image);
|
||||
if (result) {
|
||||
if (result)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
/* Install the new kernel, and Uninstall the old */
|
||||
image = xchg(dest_image, image);
|
||||
|
||||
out:
|
||||
out:
|
||||
xchg(&kexec_lock, 0); /* Release the mutex */
|
||||
kimage_free(image);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
||||
unsigned long nr_segments, struct compat_kexec_segment __user *segments,
|
||||
unsigned long flags)
|
||||
unsigned long nr_segments,
|
||||
struct compat_kexec_segment __user *segments,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct compat_kexec_segment in;
|
||||
struct kexec_segment out, __user *ksegments;
|
||||
|
@ -989,20 +1011,17 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
|||
/* Don't allow clients that don't understand the native
|
||||
* architecture to do anything.
|
||||
*/
|
||||
if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT) {
|
||||
if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nr_segments > KEXEC_SEGMENT_MAX) {
|
||||
if (nr_segments > KEXEC_SEGMENT_MAX)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
|
||||
for (i=0; i < nr_segments; i++) {
|
||||
result = copy_from_user(&in, &segments[i], sizeof(in));
|
||||
if (result) {
|
||||
if (result)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
out.buf = compat_ptr(in.buf);
|
||||
out.bufsz = in.bufsz;
|
||||
|
@ -1010,9 +1029,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
|
|||
out.memsz = in.memsz;
|
||||
|
||||
result = copy_to_user(&ksegments[i], &out, sizeof(out));
|
||||
if (result) {
|
||||
if (result)
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
return sys_kexec_load(entry, nr_segments, ksegments, flags);
|
||||
|
|
Loading…
Reference in a new issue