[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] kexec trouble



  Hi,

> Function pointers sound like the right way to go! Happy hacking!

First step of a cleanup by moving to function pointers.
Compile tested only.

First three attachments replace the patches with identical names in
patches/linux-2.6.  The last should be applied to the sparse tree.

cheers,
  Gerd

-- 
Gerd Hoffmann <kraxel@xxxxxxx>
---
 include/linux/kexec.h |   22 +++++++++++-
 kernel/kexec.c        |   85 ++++++++++++++++++++++++++++++++++++++++----------
 2 files changed, 89 insertions(+), 18 deletions(-)

Index: kexec-2.6.16/include/linux/kexec.h
===================================================================
--- kexec-2.6.16.orig/include/linux/kexec.h
+++ kexec-2.6.16/include/linux/kexec.h
@@ -85,12 +85,30 @@ struct kimage {
 #define KEXEC_TYPE_CRASH   1
 };
 
-
-
 /* kexec interface functions */
+extern unsigned long (*kexec_page_to_pfn)(struct page *page);
+extern struct page* (*kexec_pfn_to_page)(unsigned long pfn);
+extern unsigned long (*kexec_virt_to_phys)(void *addr);
+extern void* (*kexec_phys_to_virt)(unsigned long addr);
+
+#ifdef KEXEC_ARCH_USES_HOOKS
+extern NORET_TYPE void (*machine_kexec)(struct kimage *image) ATTRIB_NORET;
+extern int (*machine_kexec_prepare)(struct kimage *image);
+extern int (*machine_kexec_load)(struct kimage *image);
+extern void (*machine_kexec_unload)(struct kimage *image);
+extern void (*machine_kexec_cleanup)(struct kimage *image);
+#else
 extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
 extern int machine_kexec_prepare(struct kimage *image);
+static inline int machine_kexec_load(struct kimage *image) { return 0; }
+static inline void machine_kexec_unload(struct kimage *image) { }
 extern void machine_kexec_cleanup(struct kimage *image);
+#endif
+
+#ifdef CONFIG_XEN
+extern void xen_machine_kexec_setup_resources(void);
+extern void xen_machine_kexec_register_resources(struct resource *res);
+#endif
 extern asmlinkage long sys_kexec_load(unsigned long entry,
                                        unsigned long nr_segments,
                                        struct kexec_segment __user *segments,
Index: kexec-2.6.16/kernel/kexec.c
===================================================================
--- kexec-2.6.16.orig/kernel/kexec.c
+++ kexec-2.6.16/kernel/kexec.c
@@ -27,6 +27,31 @@
 #include <asm/system.h>
 #include <asm/semaphore.h>
 
+static unsigned long default_page_to_pfn(struct page *page)
+{
+       return page_to_pfn(page);
+}
+
+static struct page* default_pfn_to_page(unsigned long pfn)
+{
+       return pfn_to_page(pfn);
+}
+
+static unsigned long default_virt_to_phys(void *addr)
+{
+       return virt_to_phys(addr);
+}
+
+static void* default_phys_to_virt(unsigned long addr)
+{
+       return phys_to_virt(addr);
+}
+
+unsigned long (*kexec_page_to_pfn)(struct page *page) = default_page_to_pfn;
+struct page* (*kexec_pfn_to_page)(unsigned long pfn)  = default_pfn_to_page;
+unsigned long (*kexec_virt_to_phys)(void *addr) = default_virt_to_phys;
+void* (*kexec_phys_to_virt)(unsigned long addr) = default_phys_to_virt;
+
 /* Per cpu memory for storing cpu states in case of system crash. */
 note_buf_t* crash_notes;
 
@@ -403,7 +428,7 @@ static struct page *kimage_alloc_normal_
                pages = kimage_alloc_pages(GFP_KERNEL, order);
                if (!pages)
                        break;
-               pfn   = page_to_pfn(pages);
+               pfn   = kexec_page_to_pfn(pages);
                epfn  = pfn + count;
                addr  = pfn << PAGE_SHIFT;
                eaddr = epfn << PAGE_SHIFT;
@@ -437,6 +462,7 @@ static struct page *kimage_alloc_normal_
        return pages;
 }
 
+#ifndef CONFIG_XEN
 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
                                                      unsigned int order)
 {
@@ -490,7 +516,7 @@ static struct page *kimage_alloc_crash_c
                }
                /* If I don't overlap any segments I have found my hole! */
                if (i == image->nr_segments) {
-                       pages = pfn_to_page(hole_start >> PAGE_SHIFT);
+                       pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
                        break;
                }
        }
@@ -517,6 +543,13 @@ struct page *kimage_alloc_control_pages(
 
        return pages;
 }
+#else /* !CONFIG_XEN */
+struct page *kimage_alloc_control_pages(struct kimage *image,
+                                        unsigned int order)
+{
+       return kimage_alloc_normal_control_pages(image, order);
+}
+#endif
 
 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 {
@@ -532,7 +565,7 @@ static int kimage_add_entry(struct kimag
                        return -ENOMEM;
 
                ind_page = page_address(page);
-               *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
+               *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
                image->entry = ind_page;
                image->last_entry = ind_page +
                                      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
@@ -593,13 +626,13 @@ static int kimage_terminate(struct kimag
 #define for_each_kimage_entry(image, ptr, entry) \
        for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
                ptr = (entry & IND_INDIRECTION)? \
-                       phys_to_virt((entry & PAGE_MASK)): ptr +1)
+                       kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
 
 static void kimage_free_entry(kimage_entry_t entry)
 {
        struct page *page;
 
-       page = pfn_to_page(entry >> PAGE_SHIFT);
+       page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
        kimage_free_pages(page);
 }
 
@@ -611,6 +644,9 @@ static void kimage_free(struct kimage *i
        if (!image)
                return;
 
+       if (machine_kexec_unload)
+               machine_kexec_unload(image);
+
        kimage_free_extra_pages(image);
        for_each_kimage_entry(image, ptr, entry) {
                if (entry & IND_INDIRECTION) {
@@ -630,7 +666,8 @@ static void kimage_free(struct kimage *i
                kimage_free_entry(ind);
 
        /* Handle any machine specific cleanup */
-       machine_kexec_cleanup(image);
+       if (machine_kexec_cleanup)
+               machine_kexec_cleanup(image);
 
        /* Free the kexec control pages... */
        kimage_free_page_list(&image->control_pages);
@@ -686,7 +723,7 @@ static struct page *kimage_alloc_page(st
         * have a match.
         */
        list_for_each_entry(page, &image->dest_pages, lru) {
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
                if (addr == destination) {
                        list_del(&page->lru);
                        return page;
@@ -701,12 +738,12 @@ static struct page *kimage_alloc_page(st
                if (!page)
                        return NULL;
                /* If the page cannot be used file it away */
-               if (page_to_pfn(page) >
+               if (kexec_page_to_pfn(page) >
                                (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
                        list_add(&page->lru, &image->unuseable_pages);
                        continue;
                }
-               addr = page_to_pfn(page) << PAGE_SHIFT;
+               addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
 
                /* If it is the destination page we want use it */
                if (addr == destination)
@@ -729,7 +766,7 @@ static struct page *kimage_alloc_page(st
                        struct page *old_page;
 
                        old_addr = *old & PAGE_MASK;
-                       old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
+                       old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
                        copy_highpage(page, old_page);
                        *old = addr | (*old & ~PAGE_MASK);
 
@@ -779,7 +816,7 @@ static int kimage_load_normal_segment(st
                        result  = -ENOMEM;
                        goto out;
                }
-               result = kimage_add_page(image, page_to_pfn(page)
+               result = kimage_add_page(image, kexec_page_to_pfn(page)
                                                                << PAGE_SHIFT);
                if (result < 0)
                        goto out;
@@ -811,6 +848,7 @@ out:
        return result;
 }
 
+#ifndef CONFIG_XEN
 static int kimage_load_crash_segment(struct kimage *image,
                                        struct kexec_segment *segment)
 {
@@ -833,7 +871,7 @@ static int kimage_load_crash_segment(str
                char *ptr;
                size_t uchunk, mchunk;
 
-               page = pfn_to_page(maddr >> PAGE_SHIFT);
+               page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
                if (page == 0) {
                        result  = -ENOMEM;
                        goto out;
@@ -881,6 +919,13 @@ static int kimage_load_segment(struct ki
 
        return result;
 }
+#else /* CONFIG_XEN */
+static int kimage_load_segment(struct kimage *image,
+                               struct kexec_segment *segment)
+{
+       return kimage_load_normal_segment(image, segment);
+}
+#endif
 
 /*
  * Exec Kernel system call: for obvious reasons only root may call it.
@@ -978,9 +1023,11 @@ asmlinkage long sys_kexec_load(unsigned 
                if (result)
                        goto out;
 
-               result = machine_kexec_prepare(image);
-               if (result)
-                       goto out;
+               if (machine_kexec_prepare) {
+                       result = machine_kexec_prepare(image);
+                       if (result)
+                               goto out;
+               }
 
                for (i = 0; i < nr_segments; i++) {
                        result = kimage_load_segment(image, &image->segment[i]);
@@ -991,6 +1038,13 @@ asmlinkage long sys_kexec_load(unsigned 
                if (result)
                        goto out;
        }
+
+       if (machine_kexec_load) {
+               result = machine_kexec_load(image);
+               if (result)
+                       goto out;
+       }
+
        /* Install the new kernel, and  Uninstall the old */
        image = xchg(dest_image, image);
 
@@ -1045,7 +1099,6 @@ void crash_kexec(struct pt_regs *regs)
        struct kimage *image;
        int locked;
 
-
        /* Take the kexec_lock here to prevent sys_kexec_load
         * running on one cpu from replacing the crash kernel
         * we are using after a panic on a different cpu.
---
 arch/i386/kernel/crash.c         |    4 ++
 arch/i386/kernel/machine_kexec.c |   65 +++++++++++++++++++++++++--------------
 include/asm-i386/kexec.h         |    3 +
 3 files changed, 49 insertions(+), 23 deletions(-)

Index: kexec-2.6.16/arch/i386/kernel/crash.c
===================================================================
--- kexec-2.6.16.orig/arch/i386/kernel/crash.c
+++ kexec-2.6.16/arch/i386/kernel/crash.c
@@ -90,6 +90,7 @@ static void crash_save_self(struct pt_re
        crash_save_this_cpu(regs, cpu);
 }
 
+#ifndef CONFIG_XEN
 #ifdef CONFIG_SMP
 static atomic_t waiting_for_crash_ipi;
 
@@ -158,6 +159,7 @@ static void nmi_shootdown_cpus(void)
        /* There are no cpus to shootdown */
 }
 #endif
+#endif /* CONFIG_XEN */
 
 void machine_crash_shutdown(struct pt_regs *regs)
 {
@@ -174,10 +176,12 @@ void machine_crash_shutdown(struct pt_re
 
        /* Make a note of crashing cpu. Will be used in NMI callback.*/
        crashing_cpu = smp_processor_id();
+#ifndef CONFIG_XEN
        nmi_shootdown_cpus();
        lapic_shutdown();
 #if defined(CONFIG_X86_IO_APIC)
        disable_IO_APIC();
 #endif
+#endif /* CONFIG_XEN */
        crash_save_self(regs);
 }
Index: kexec-2.6.16/arch/i386/kernel/machine_kexec.c
===================================================================
--- kexec-2.6.16.orig/arch/i386/kernel/machine_kexec.c
+++ kexec-2.6.16/arch/i386/kernel/machine_kexec.c
@@ -19,6 +19,10 @@
 #include <asm/desc.h>
 #include <asm/system.h>
 
+#ifdef CONFIG_XEN
+#include <xen/interface/kexec.h>
+#endif
+
 #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
 static u32 kexec_pgd[1024] PAGE_ALIGNED;
 #ifdef CONFIG_X86_PAE
@@ -28,37 +32,45 @@ static u32 kexec_pmd1[1024] PAGE_ALIGNED
 static u32 kexec_pte0[1024] PAGE_ALIGNED;
 static u32 kexec_pte1[1024] PAGE_ALIGNED;
 
-/*
- * A architecture hook called to validate the
- * proposed image and prepare the control pages
- * as needed.  The pages for KEXEC_CONTROL_CODE_SIZE
- * have been allocated, but the segments have yet
- * been copied into the kernel.
- *
- * Do what every setup is needed on image and the
- * reboot code buffer to allow us to avoid allocations
- * later.
- *
- * Currently nothing.
- */
-int machine_kexec_prepare(struct kimage *image)
-{
-       return 0;
-}
+#ifdef CONFIG_XEN
 
-/*
- * Undo anything leftover by machine_kexec_prepare
- * when an image is freed.
- */
-void machine_kexec_cleanup(struct kimage *image)
+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
+
+#if PAGES_NR > KEXEC_XEN_NO_PAGES
+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
+#endif
+
+#if PA_CONTROL_PAGE != 0
+#error PA_CONTROL_PAGE is non zero - Xen support will break
+#endif
+
+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
 {
+       void *control_page;
+
+       memset(xki->page_list, 0, sizeof(xki->page_list));
+
+       control_page = page_address(image->control_code_page);
+       memcpy(control_page, relocate_kernel, PAGE_SIZE);
+
+       xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
+       xki->page_list[PA_PGD] = __ma(kexec_pgd);
+#ifdef CONFIG_X86_PAE
+       xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
+       xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
+#endif
+       xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
+       xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
+
 }
 
+#endif /* CONFIG_XEN */
+
 /*
  * Do not allocate memory (or fail in any way) in machine_kexec().
  * We are past the point of no return, committed to rebooting now.
  */
-NORET_TYPE void machine_kexec(struct kimage *image)
+static NORET_TYPE ATTRIB_NORET void native_machine_kexec(struct kimage *image)
 {
        unsigned long page_list[PAGES_NR];
        void *control_page;
@@ -87,3 +99,10 @@ NORET_TYPE void machine_kexec(struct kim
        relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
                        image->start, cpu_has_pae);
 }
+
+NORET_TYPE void (*machine_kexec)(struct kimage *image) ATTRIB_NORET
+       = native_machine_kexec;
+int (*machine_kexec_prepare)(struct kimage *image)  = NULL;
+int (*machine_kexec_load)(struct kimage *image)     = NULL;
+void (*machine_kexec_unload)(struct kimage *image)  = NULL;
+void (*machine_kexec_cleanup)(struct kimage *image) = NULL;
Index: kexec-2.6.16/include/asm-i386/kexec.h
===================================================================
--- kexec-2.6.16.orig/include/asm-i386/kexec.h
+++ kexec-2.6.16/include/asm-i386/kexec.h
@@ -98,6 +98,9 @@ relocate_kernel(unsigned long indirectio
                unsigned long start_address,
                unsigned int has_pae) ATTRIB_NORET;
 
+
+#define KEXEC_ARCH_USES_HOOKS 1
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _I386_KEXEC_H */
---
 arch/x86_64/kernel/crash.c         |    6 +
 arch/x86_64/kernel/machine_kexec.c |  133 +++++++++++++++++++++++++++++++++----
 include/asm-x86_64/kexec.h         |    7 +
 3 files changed, 132 insertions(+), 14 deletions(-)

Index: kexec-2.6.16/arch/x86_64/kernel/crash.c
===================================================================
--- kexec-2.6.16.orig/arch/x86_64/kernel/crash.c
+++ kexec-2.6.16/arch/x86_64/kernel/crash.c
@@ -92,6 +92,7 @@ static void crash_save_self(struct pt_re
        crash_save_this_cpu(regs, cpu);
 }
 
+#ifndef CONFIG_XEN
 #ifdef CONFIG_SMP
 static atomic_t waiting_for_crash_ipi;
 
@@ -156,6 +157,7 @@ static void nmi_shootdown_cpus(void)
        /* There are no cpus to shootdown */
 }
 #endif
+#endif /* CONFIG_XEN */
 
 void machine_crash_shutdown(struct pt_regs *regs)
 {
@@ -173,6 +175,8 @@ void machine_crash_shutdown(struct pt_re
 
        /* Make a note of crashing cpu. Will be used in NMI callback.*/
        crashing_cpu = smp_processor_id();
+
+#ifndef CONFIG_XEN
        nmi_shootdown_cpus();
 
        if(cpu_has_apic)
@@ -181,6 +185,6 @@ void machine_crash_shutdown(struct pt_re
 #if defined(CONFIG_X86_IO_APIC)
        disable_IO_APIC();
 #endif
-
+#endif /* CONFIG_XEN */
        crash_save_self(regs);
 }
Index: kexec-2.6.16/arch/x86_64/kernel/machine_kexec.c
===================================================================
--- kexec-2.6.16.orig/arch/x86_64/kernel/machine_kexec.c
+++ kexec-2.6.16/arch/x86_64/kernel/machine_kexec.c
@@ -24,6 +24,104 @@ static u64 kexec_pud1[512] PAGE_ALIGNED;
 static u64 kexec_pmd1[512] PAGE_ALIGNED;
 static u64 kexec_pte1[512] PAGE_ALIGNED;
 
+#ifdef CONFIG_XEN
+
+/* In the case of Xen, override hypervisor functions to be able to create
+ * a regular identity mapping page table...
+ */
+
+#include <xen/interface/kexec.h>
+#include <xen/interface/memory.h>
+
+#define x__pmd(x) ((pmd_t) { (x) } )
+#define x__pud(x) ((pud_t) { (x) } )
+#define x__pgd(x) ((pgd_t) { (x) } )
+
+#define x_pmd_val(x)   ((x).pmd)
+#define x_pud_val(x)   ((x).pud)
+#define x_pgd_val(x)   ((x).pgd)
+
+static inline void x_set_pmd(pmd_t *dst, pmd_t val)
+{
+       x_pmd_val(*dst) = x_pmd_val(val);
+}
+
+static inline void x_set_pud(pud_t *dst, pud_t val)
+{
+       x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
+}
+
+static inline void x_pud_clear (pud_t *pud)
+{
+       x_pud_val(*pud) = 0;
+}
+
+static inline void x_set_pgd(pgd_t *dst, pgd_t val)
+{
+       x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
+}
+
+static inline void x_pgd_clear (pgd_t * pgd)
+{
+       x_pgd_val(*pgd) = 0;
+}
+
+#define X__PAGE_KERNEL_LARGE_EXEC \
+         _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
+#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
+
+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
+
+#if PAGES_NR > KEXEC_XEN_NO_PAGES
+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
+#endif
+
+#if PA_CONTROL_PAGE != 0
+#error PA_CONTROL_PAGE is non zero - Xen support will break
+#endif
+
+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
+{
+       void *control_page;
+       void *table_page;
+
+       memset(xki->page_list, 0, sizeof(xki->page_list));
+
+       control_page = page_address(image->control_code_page) + PAGE_SIZE;
+       memcpy(control_page, relocate_kernel, PAGE_SIZE);
+
+       table_page = page_address(image->control_code_page);
+
+       xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
+       xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
+
+       xki->page_list[PA_PGD] = __ma(kexec_pgd);
+       xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
+       xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
+       xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
+       xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
+       xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
+       xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
+}
+
+#else /* CONFIG_XEN */
+
+#define x__pmd(x) __pmd(x)
+#define x__pud(x) __pud(x)
+#define x__pgd(x) __pgd(x)
+
+#define x_set_pmd(x, y) set_pmd(x, y)
+#define x_set_pud(x, y) set_pud(x, y)
+#define x_set_pgd(x, y) set_pgd(x, y)
+
+#define x_pud_clear(x) pud_clear(x)
+#define x_pgd_clear(x) pgd_clear(x)
+
+#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
+#define X_KERNPG_TABLE _KERNPG_TABLE
+
+#endif /* CONFIG_XEN */
+
 static void init_level2_page(pmd_t *level2p, unsigned long addr)
 {
        unsigned long end_addr;
@@ -31,7 +129,7 @@ static void init_level2_page(pmd_t *leve
        addr &= PAGE_MASK;
        end_addr = addr + PUD_SIZE;
        while (addr < end_addr) {
-               set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+               x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
                addr += PMD_SIZE;
        }
 }
@@ -56,12 +154,12 @@ static int init_level3_page(struct kimag
                }
                level2p = (pmd_t *)page_address(page);
                init_level2_page(level2p, addr);
-               set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
+               x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
                addr += PUD_SIZE;
        }
        /* clear the unused entries */
        while (addr < end_addr) {
-               pud_clear(level3p++);
+               x_pud_clear(level3p++);
                addr += PUD_SIZE;
        }
 out:
@@ -92,12 +190,12 @@ static int init_level4_page(struct kimag
                if (result) {
                        goto out;
                }
-               set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
+               x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
                addr += PGDIR_SIZE;
        }
        /* clear the unused entries */
        while (addr < end_addr) {
-               pgd_clear(level4p++);
+               x_pgd_clear(level4p++);
                addr += PGDIR_SIZE;
        }
 out:
@@ -108,11 +206,17 @@ out:
 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
 {
        pgd_t *level4p;
+       unsigned long x_end_pfn = end_pfn;
+
+#ifdef CONFIG_XEN
+       x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
+#endif
+
        level4p = (pgd_t *)__va(start_pgtable);
-       return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
+       return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
 }
 
-int machine_kexec_prepare(struct kimage *image)
+static int native_machine_kexec_prepare(struct kimage *image)
 {
        unsigned long start_pgtable;
        int result;
@@ -128,16 +232,11 @@ int machine_kexec_prepare(struct kimage 
        return 0;
 }
 
-void machine_kexec_cleanup(struct kimage *image)
-{
-       return;
-}
-
 /*
  * Do not allocate memory (or fail in any way) in machine_kexec().
  * We are past the point of no return, committed to rebooting now.
  */
-NORET_TYPE void machine_kexec(struct kimage *image)
+static NORET_TYPE ATTRIB_NORET void native_machine_kexec(struct kimage *image)
 {
        unsigned long page_list[PAGES_NR];
        void *control_page;
@@ -171,3 +270,11 @@ NORET_TYPE void machine_kexec(struct kim
        relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
                        image->start);
 }
+
+NORET_TYPE void (*machine_kexec)(struct kimage *image) ATTRIB_NORET
+       = native_machine_kexec;
+int (*machine_kexec_prepare)(struct kimage *image)
+       = native_machine_kexec_prepare;
+int (*machine_kexec_load)(struct kimage *image)     = NULL;
+void (*machine_kexec_unload)(struct kimage *image)  = NULL;
+void (*machine_kexec_cleanup)(struct kimage *image) = NULL;
Index: kexec-2.6.16/include/asm-x86_64/kexec.h
===================================================================
--- kexec-2.6.16.orig/include/asm-x86_64/kexec.h
+++ kexec-2.6.16/include/asm-x86_64/kexec.h
@@ -91,6 +91,13 @@ relocate_kernel(unsigned long indirectio
                unsigned long page_list,
                unsigned long start_address) ATTRIB_NORET;
 
+/* Under Xen we need to work with machine addresses. These macros give the
+ * machine address of a certain page to the generic kexec code instead of
+ * the pseudo physical address which would be given by the default macros.
+ */
+
+#define KEXEC_ARCH_USES_HOOKS 1
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _X86_64_KEXEC_H */
---
 drivers/xen/core/machine_kexec.c |   42 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 39 insertions(+), 3 deletions(-)

Index: kexec-2.6.16/drivers/xen/core/machine_kexec.c
===================================================================
--- kexec-2.6.16.orig/drivers/xen/core/machine_kexec.c
+++ kexec-2.6.16/drivers/xen/core/machine_kexec.c
@@ -11,6 +11,7 @@
 
 extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, 
                                         struct kimage *image);
+static void xen0_set_hooks(void);
 
 int xen_max_nr_phys_cpus;
 struct resource xen_hypervisor_res;
@@ -24,6 +25,7 @@ void xen_machine_kexec_setup_resources(v
 
        if (!is_initial_xendomain())
                return;
+       xen0_set_hooks();
 
        /* determine maximum number of physical cpus */
 
@@ -124,7 +126,7 @@ static void setup_load_arg(xen_kexec_ima
  * is currently called too early. It might make sense
  * to move prepare, but for now, just add an extra hook.
  */
-int xen_machine_kexec_load(struct kimage *image)
+static int xen0_machine_kexec_load(struct kimage *image)
 {
        xen_kexec_load_t xkl;
 
@@ -140,7 +142,7 @@ int xen_machine_kexec_load(struct kimage
  * is called too late, and its possible xen could try and kdump
  * using resources that have been freed.
  */
-void xen_machine_kexec_unload(struct kimage *image)
+static void xen0_machine_kexec_unload(struct kimage *image)
 {
        xen_kexec_load_t xkl;
 
@@ -157,7 +159,7 @@ void xen_machine_kexec_unload(struct kim
  * stop all CPUs and kexec. That is it combines machine_shutdown()
  * and machine_kexec() in Linux kexec terms.
  */
-NORET_TYPE void xen_machine_kexec(struct kimage *image)
+static NORET_TYPE void xen0_machine_kexec(struct kimage *image)
 {
        xen_kexec_exec_t xke;
 
@@ -172,6 +174,40 @@ void machine_shutdown(void)
        /* do nothing */
 }
 
+static unsigned long xen0_page_to_pfn(struct page *page)
+{
+       return pfn_to_mfn(page_to_pfn(page));
+}
+
+static struct page* xen0_pfn_to_page(unsigned long pfn)
+{
+       return pfn_to_page(mfn_to_pfn(pfn));
+}
+
+static unsigned long xen0_virt_to_phys(void *addr)
+{
+       return virt_to_machine(addr);
+}
+
+static void* xen0_phys_to_virt(unsigned long addr)
+{
+       return phys_to_virt(machine_to_phys(addr));
+}
+
+
+static void xen0_set_hooks(void)
+{
+       kexec_page_to_pfn  = xen0_page_to_pfn;
+       kexec_pfn_to_page  = xen0_pfn_to_page;
+       kexec_virt_to_phys = xen0_virt_to_phys;
+       kexec_phys_to_virt = xen0_phys_to_virt;
+
+       machine_kexec_load   = xen0_machine_kexec_load;
+       machine_kexec_unload = xen0_machine_kexec_unload;
+       machine_kexec        = xen0_machine_kexec;
+
+       printk("%s: kexec hook setup done\n", __FUNCTION__);
+}
 
 /*
  * Local variables:
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.