[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [IA64] lindent dom_fw_foo code



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1180107181 21600
# Node ID 0e5c0cde23b10a268022bbebe3b22fdf9e9fdcc7
# Parent  034f3e20ad10cc04935367cf0c8e2d99959b1518
[IA64] lindent dom_fw_foo code

Lindent the dom_fw_<foo> code to revert formatting that was done as
part of splitting up the files.

Signed-off-by: Jes Sorensen <jes@xxxxxxx>
---
 xen/arch/ia64/xen/dom_fw_dom0.c  |  652 +++++++++++++++++++--------------------
 xen/arch/ia64/xen/dom_fw_domu.c  |  316 +++++++++---------
 xen/arch/ia64/xen/dom_fw_utils.c |  353 ++++++++++-----------
 3 files changed, 661 insertions(+), 660 deletions(-)

diff -r 034f3e20ad10 -r 0e5c0cde23b1 xen/arch/ia64/xen/dom_fw_dom0.c
--- a/xen/arch/ia64/xen/dom_fw_dom0.c   Thu May 24 16:28:48 2007 -0600
+++ b/xen/arch/ia64/xen/dom_fw_dom0.c   Fri May 25 09:33:01 2007 -0600
@@ -39,354 +39,352 @@ static u32 lsapic_nbr;
 
 /* Modify lsapic table.  Provides LPs.  */
 static int __init
-acpi_update_lsapic(acpi_table_entry_header *header, const unsigned long end)
-{
-    struct acpi_table_lsapic *lsapic;
-    int enable;
-
-    lsapic = (struct acpi_table_lsapic *) header;
-    if (!lsapic)
-        return -EINVAL;
-
-    if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL)
-        enable = 1;
-    else
-        enable = 0;
-
-    if (lsapic->flags.enabled && enable) {
-        printk("enable lsapic entry: 0x%lx\n", (u64)lsapic);
-        lsapic->id = lsapic_nbr;
-        lsapic->eid = 0;
-        lsapic_nbr++;
-    } else if (lsapic->flags.enabled) {
-        printk("DISABLE lsapic entry: 0x%lx\n", (u64)lsapic);
-        lsapic->flags.enabled = 0;
-        lsapic->id = 0;
-        lsapic->eid = 0;
-    }
-    return 0;
+acpi_update_lsapic(acpi_table_entry_header * header, const unsigned long end)
+{
+       struct acpi_table_lsapic *lsapic;
+       int enable;
+
+       lsapic = (struct acpi_table_lsapic *)header;
+       if (!lsapic)
+               return -EINVAL;
+
+       if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL)
+               enable = 1;
+       else
+               enable = 0;
+
+       if (lsapic->flags.enabled && enable) {
+               printk("enable lsapic entry: 0x%lx\n", (u64) lsapic);
+               lsapic->id = lsapic_nbr;
+               lsapic->eid = 0;
+               lsapic_nbr++;
+       } else if (lsapic->flags.enabled) {
+               printk("DISABLE lsapic entry: 0x%lx\n", (u64) lsapic);
+               lsapic->flags.enabled = 0;
+               lsapic->id = 0;
+               lsapic->eid = 0;
+       }
+       return 0;
 }
 
 static int __init
-acpi_patch_plat_int_src(acpi_table_entry_header *header,
-                        const unsigned long end)
-{
-    struct acpi_table_plat_int_src *plintsrc;
-
-    plintsrc = (struct acpi_table_plat_int_src *)header;
-    if (!plintsrc)
-        return -EINVAL;
-
-    if (plintsrc->type == ACPI_INTERRUPT_CPEI) {
-        printk("ACPI_INTERRUPT_CPEI disabled for Domain0\n");
-        plintsrc->type = -1;
-    }
-    return 0;
+acpi_patch_plat_int_src(acpi_table_entry_header * header,
+                       const unsigned long end)
+{
+       struct acpi_table_plat_int_src *plintsrc;
+
+       plintsrc = (struct acpi_table_plat_int_src *)header;
+       if (!plintsrc)
+               return -EINVAL;
+
+       if (plintsrc->type == ACPI_INTERRUPT_CPEI) {
+               printk("ACPI_INTERRUPT_CPEI disabled for Domain0\n");
+               plintsrc->type = -1;
+       }
+       return 0;
 }
 
 static int __init
 acpi_update_madt_checksum(unsigned long phys_addr, unsigned long size)
 {
-    struct acpi_table_madt* acpi_madt;
-
-    if (!phys_addr || !size)
-        return -EINVAL;
-
-    acpi_madt = (struct acpi_table_madt *) __va(phys_addr);
-    acpi_madt->header.checksum = 0;
-    acpi_madt->header.checksum = generate_acpi_checksum(acpi_madt, size);
-
-    return 0;
+       struct acpi_table_madt *acpi_madt;
+
+       if (!phys_addr || !size)
+               return -EINVAL;
+
+       acpi_madt = (struct acpi_table_madt *)__va(phys_addr);
+       acpi_madt->header.checksum = 0;
+       acpi_madt->header.checksum = generate_acpi_checksum(acpi_madt, size);
+
+       return 0;
 }
 
 /* base is physical address of acpi table */
+static void __init touch_acpi_table(void)
+{
+       lsapic_nbr = 0;
+
+       if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
+               printk("Error parsing MADT - no LAPIC entries\n");
+       if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
+                                 acpi_patch_plat_int_src, 0) < 0)
+               printk("Error parsing MADT - no PLAT_INT_SRC entries\n");
+
+       acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
+
+       return;
+}
+
+void __init efi_systable_init_dom0(struct fw_tables *tables)
+{
+       int i = 1;
+
+       /* Write messages to the console.  */
+       touch_acpi_table();
+
+       printk("Domain0 EFI passthrough:");
+       if (efi.mps) {
+               tables->efi_tables[i].guid = MPS_TABLE_GUID;
+               tables->efi_tables[i].table = __pa(efi.mps);
+               printk(" MPS=0x%lx", tables->efi_tables[i].table);
+               i++;
+       }
+       if (efi.acpi20) {
+               tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
+               tables->efi_tables[i].table = __pa(efi.acpi20);
+               printk(" ACPI 2.0=0x%lx", tables->efi_tables[i].table);
+               i++;
+       }
+       if (efi.acpi) {
+               tables->efi_tables[i].guid = ACPI_TABLE_GUID;
+               tables->efi_tables[i].table = __pa(efi.acpi);
+               printk(" ACPI=0x%lx", tables->efi_tables[i].table);
+               i++;
+       }
+       if (efi.smbios) {
+               tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
+               tables->efi_tables[i].table = __pa(efi.smbios);
+               printk(" SMBIOS=0x%lx", tables->efi_tables[i].table);
+               i++;
+       }
+       if (efi.hcdp) {
+               tables->efi_tables[i].guid = HCDP_TABLE_GUID;
+               tables->efi_tables[i].table = __pa(efi.hcdp);
+               printk(" HCDP=0x%lx", tables->efi_tables[i].table);
+               i++;
+       }
+       printk("\n");
+       BUG_ON(i > NUM_EFI_SYS_TABLES);
+}
+
 static void __init
-touch_acpi_table(void)
-{
-    lsapic_nbr = 0;
-
-    if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
-        printk("Error parsing MADT - no LAPIC entries\n");
-    if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
-                              acpi_patch_plat_int_src, 0) < 0)
-        printk("Error parsing MADT - no PLAT_INT_SRC entries\n");
-
-    acpi_table_parse(ACPI_APIC, acpi_update_madt_checksum);
-
-    return;
-}
-
-void __init
-efi_systable_init_dom0(struct fw_tables *tables)
-{
-    int i = 1;
-
-    /* Write messages to the console.  */
-    touch_acpi_table();
-
-    printk("Domain0 EFI passthrough:");
-    if (efi.mps) {
-        tables->efi_tables[i].guid = MPS_TABLE_GUID;
-        tables->efi_tables[i].table = __pa(efi.mps);
-        printk(" MPS=0x%lx",tables->efi_tables[i].table);
-        i++;
-    }
-    if (efi.acpi20) {
-        tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
-        tables->efi_tables[i].table = __pa(efi.acpi20);
-        printk(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
-        i++;
-    }
-    if (efi.acpi) {
-        tables->efi_tables[i].guid = ACPI_TABLE_GUID;
-        tables->efi_tables[i].table = __pa(efi.acpi);
-        printk(" ACPI=0x%lx",tables->efi_tables[i].table);
-        i++;
-    }
-    if (efi.smbios) {
-        tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
-        tables->efi_tables[i].table = __pa(efi.smbios);
-        printk(" SMBIOS=0x%lx",tables->efi_tables[i].table);
-        i++;
-    }
-    if (efi.hcdp) {
-        tables->efi_tables[i].guid = HCDP_TABLE_GUID;
-        tables->efi_tables[i].table = __pa(efi.hcdp);
-        printk(" HCDP=0x%lx",tables->efi_tables[i].table);
-        i++;
-    }
-    printk("\n");
-    BUG_ON(i > NUM_EFI_SYS_TABLES);
-}
-
-static void __init
-setup_dom0_memmap_info(struct domain *d, struct fw_tables *tables,
-                       int *num_mds)
-{
-    int i;
-    efi_memory_desc_t *md;
-    efi_memory_desc_t *last_mem_md = NULL;
-    xen_ia64_memmap_info_t* memmap_info;
-    unsigned long paddr_start;
-    unsigned long paddr_end;
-
-    for (i = *num_mds - 1; i >= 0; i--) {
-        md = &tables->efi_memmap[i];
-        if (md->attribute == EFI_MEMORY_WB &&
-            md->type == EFI_CONVENTIONAL_MEMORY &&
-            md->num_pages > 2 * (1UL << (PAGE_SHIFT - EFI_PAGE_SHIFT))) {
-            last_mem_md = md;
-            break;
-        }
-    }
-
-    if (last_mem_md == NULL) {
-        printk("%s: warning: "
-               "no dom0 contiguous memory to hold memory map\n",
-               __func__);
-        return;
-    }
-    paddr_end = last_mem_md->phys_addr +
-        (last_mem_md->num_pages << EFI_PAGE_SHIFT);
-    paddr_start = (paddr_end - PAGE_SIZE) & PAGE_MASK;
-    last_mem_md->num_pages -=
-        (paddr_end - paddr_start) / (1UL << EFI_PAGE_SHIFT);
-
-    md = &tables->efi_memmap[*num_mds];
-    (*num_mds)++;
-    md->type = EFI_RUNTIME_SERVICES_DATA;
-    md->phys_addr = paddr_start;
-    md->virt_addr = 0;
-    md->num_pages = 1UL << (PAGE_SHIFT - EFI_PAGE_SHIFT);
-    md->attribute = EFI_MEMORY_WB;
-
-    memmap_info = domain_mpa_to_imva(d, md->phys_addr);
-    BUG_ON(*num_mds > NUM_MEM_DESCS);
-
-    memmap_info->efi_memdesc_size = sizeof(md[0]);
-    memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
-    memmap_info->efi_memmap_size = *num_mds * sizeof(md[0]);
-    memcpy(&memmap_info->memdesc, &tables->efi_memmap[0],
-           memmap_info->efi_memmap_size);
-    d->shared_info->arch.memmap_info_num_pages = 1;
-    d->shared_info->arch.memmap_info_pfn = md->phys_addr >> PAGE_SHIFT;
-
-    sort(tables->efi_memmap, *num_mds, sizeof(efi_memory_desc_t),
-         efi_mdt_cmp, NULL);
+setup_dom0_memmap_info(struct domain *d, struct fw_tables *tables, int 
*num_mds)
+{
+       int i;
+       efi_memory_desc_t *md;
+       efi_memory_desc_t *last_mem_md = NULL;
+       xen_ia64_memmap_info_t *memmap_info;
+       unsigned long paddr_start;
+       unsigned long paddr_end;
+
+       for (i = *num_mds - 1; i >= 0; i--) {
+               md = &tables->efi_memmap[i];
+               if (md->attribute == EFI_MEMORY_WB &&
+                   md->type == EFI_CONVENTIONAL_MEMORY &&
+                   md->num_pages >
+                   2 * (1UL << (PAGE_SHIFT - EFI_PAGE_SHIFT))) {
+                       last_mem_md = md;
+                       break;
+               }
+       }
+
+       if (last_mem_md == NULL) {
+               printk("%s: warning: "
+                      "no dom0 contiguous memory to hold memory map\n",
+                      __func__);
+               return;
+       }
+       paddr_end = last_mem_md->phys_addr +
+           (last_mem_md->num_pages << EFI_PAGE_SHIFT);
+       paddr_start = (paddr_end - PAGE_SIZE) & PAGE_MASK;
+       last_mem_md->num_pages -=
+           (paddr_end - paddr_start) / (1UL << EFI_PAGE_SHIFT);
+
+       md = &tables->efi_memmap[*num_mds];
+       (*num_mds)++;
+       md->type = EFI_RUNTIME_SERVICES_DATA;
+       md->phys_addr = paddr_start;
+       md->virt_addr = 0;
+       md->num_pages = 1UL << (PAGE_SHIFT - EFI_PAGE_SHIFT);
+       md->attribute = EFI_MEMORY_WB;
+
+       memmap_info = domain_mpa_to_imva(d, md->phys_addr);
+       BUG_ON(*num_mds > NUM_MEM_DESCS);
+
+       memmap_info->efi_memdesc_size = sizeof(md[0]);
+       memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
+       memmap_info->efi_memmap_size = *num_mds * sizeof(md[0]);
+       memcpy(&memmap_info->memdesc, &tables->efi_memmap[0],
+              memmap_info->efi_memmap_size);
+       d->shared_info->arch.memmap_info_num_pages = 1;
+       d->shared_info->arch.memmap_info_pfn = md->phys_addr >> PAGE_SHIFT;
+
+       sort(tables->efi_memmap, *num_mds, sizeof(efi_memory_desc_t),
+            efi_mdt_cmp, NULL);
 }
 
 /* Complete the dom0 memmap.  */
 int __init
 complete_dom0_memmap(struct domain *d,
-                     struct fw_tables *tables,
-                     unsigned long maxmem,
-                     int num_mds)
-{
-    efi_memory_desc_t *md;
-    u64 addr;
-    void *efi_map_start, *efi_map_end, *p;
-    u64 efi_desc_size;
-    int i;
-    unsigned long dom_mem = maxmem - (d->tot_pages << PAGE_SHIFT);
-
-    /* Walk through all MDT entries.
-       Copy all interesting entries.  */
-    efi_map_start = __va(ia64_boot_param->efi_memmap);
-    efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
-    efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
-    for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
-        const efi_memory_desc_t *md = p;
-        efi_memory_desc_t *dom_md = &tables->efi_memmap[num_mds];
-        u64 start = md->phys_addr;
-        u64 size = md->num_pages << EFI_PAGE_SHIFT;
-        u64 end = start + size;
-        u64 mpaddr;
-        unsigned long flags;
-
-        switch (md->type) {
-        case EFI_RUNTIME_SERVICES_CODE:
-        case EFI_RUNTIME_SERVICES_DATA:
-        case EFI_ACPI_RECLAIM_MEMORY:
-        case EFI_ACPI_MEMORY_NVS:
-        case EFI_RESERVED_TYPE:
-            /*
-             * Map into dom0 - We must respect protection
-             * and cache attributes.  Not all of these pages
-             * are writable!!!
-             */
-            flags = ASSIGN_writable;    /* dummy - zero */
-            if (md->attribute & EFI_MEMORY_WP)
-                flags |= ASSIGN_readonly;
-            if ((md->attribute & EFI_MEMORY_UC) &&
-                !(md->attribute & EFI_MEMORY_WB))
-                flags |= ASSIGN_nocache;
-
-            assign_domain_mach_page(d, start, size, flags);
-
-            /* Fall-through.  */
-        case EFI_MEMORY_MAPPED_IO:
-            /* Will be mapped with ioremap.  */
-            /* Copy descriptor.  */
-            *dom_md = *md;
-            dom_md->virt_addr = 0;
-            num_mds++;
-            break;
-
-        case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
-            flags = ASSIGN_writable;    /* dummy - zero */
-            if (md->attribute & EFI_MEMORY_UC)
-                flags |= ASSIGN_nocache;
-
-            if (start > 0x1ffffffff0000000UL) {
-                mpaddr = 0x4000000000000UL - size;
-                printk(XENLOG_INFO "Remapping IO ports from "
-                       "%lx to %lx\n", start, mpaddr);
-            } else
-                mpaddr = start;
-
-            /* Map into dom0.  */
-            assign_domain_mmio_page(d, mpaddr, start, size, flags);
-            /* Copy descriptor.  */
-            *dom_md = *md;
-            dom_md->phys_addr = mpaddr;
-            dom_md->virt_addr = 0;
-            num_mds++;
-            break;
-
-        case EFI_CONVENTIONAL_MEMORY:
-        case EFI_LOADER_CODE:
-        case EFI_LOADER_DATA:
-        case EFI_BOOT_SERVICES_CODE:
-        case EFI_BOOT_SERVICES_DATA:
-            if (!(md->attribute & EFI_MEMORY_WB))
-                break;
-
-            start = max(FW_END_PADDR, start);
-            end = min(start + dom_mem, end);
-            if (end <= start)
-                break;
-
-            dom_md->type = EFI_CONVENTIONAL_MEMORY;
-            dom_md->phys_addr = start;
-            dom_md->virt_addr = 0;
-            dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
-            dom_md->attribute = EFI_MEMORY_WB;
-            num_mds++;
-
-            dom_mem -= dom_md->num_pages << EFI_PAGE_SHIFT;
-            break;
-
-        case EFI_UNUSABLE_MEMORY:
-        case EFI_PAL_CODE:
-            /*
-             * We don't really need these, but holes in the
-             * memory map may cause Linux to assume there are
-             * uncacheable ranges within a granule.
-             */
-            dom_md->type = EFI_UNUSABLE_MEMORY;
-            dom_md->phys_addr = start;
-            dom_md->virt_addr = 0;
-            dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
-            dom_md->attribute = EFI_MEMORY_WB;
-            num_mds++;
-            break;
-
-        default:
-            /* Print a warning but continue.  */
-            printk("complete_dom0_memmap: warning: "
-                   "unhandled MDT entry type %u\n", md->type);
-        }
-    }
-    BUG_ON(num_mds > NUM_MEM_DESCS);
-    
-    sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
-         efi_mdt_cmp, NULL);
-
-    /* setup_guest() @ libxc/xc_linux_build() arranges memory for domU.
-     * however no one arranges memory for dom0,
-     * instead we allocate pages manually.
-     */
-    for (i = 0; i < num_mds; i++) {
-        md = &tables->efi_memmap[i];
-
-        if (md->type == EFI_LOADER_DATA ||
-            md->type == EFI_PAL_CODE ||
-            md->type == EFI_CONVENTIONAL_MEMORY) {
-            unsigned long start = md->phys_addr & PAGE_MASK;
-            unsigned long end = md->phys_addr +
-                                (md->num_pages << EFI_PAGE_SHIFT);
-
-            if (end == start) {
-                /* md->num_pages = 0 is allowed. */
-                continue;
-            }
-            
-            for (addr = start; addr < end; addr += PAGE_SIZE)
-                assign_new_domain0_page(d, addr);
-        }
-    }
-    // Map low-memory holes & unmapped MMIO for legacy drivers
-    for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
-        if (domain_page_mapped(d, addr))
-            continue;
-        
-        if (efi_mmio(addr, PAGE_SIZE)) {
-            unsigned long flags;
-            flags = ASSIGN_writable | ASSIGN_nocache;
-            assign_domain_mmio_page(d, addr, addr, PAGE_SIZE, flags);
-        }
-    }
-    setup_dom0_memmap_info(d, tables, &num_mds);
-    return num_mds;
+                    struct fw_tables *tables,
+                    unsigned long maxmem, int num_mds)
+{
+       efi_memory_desc_t *md;
+       u64 addr;
+       void *efi_map_start, *efi_map_end, *p;
+       u64 efi_desc_size;
+       int i;
+       unsigned long dom_mem = maxmem - (d->tot_pages << PAGE_SHIFT);
+
+       /* Walk through all MDT entries.
+          Copy all interesting entries.  */
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+       for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+               const efi_memory_desc_t *md = p;
+               efi_memory_desc_t *dom_md = &tables->efi_memmap[num_mds];
+               u64 start = md->phys_addr;
+               u64 size = md->num_pages << EFI_PAGE_SHIFT;
+               u64 end = start + size;
+               u64 mpaddr;
+               unsigned long flags;
+
+               switch (md->type) {
+               case EFI_RUNTIME_SERVICES_CODE:
+               case EFI_RUNTIME_SERVICES_DATA:
+               case EFI_ACPI_RECLAIM_MEMORY:
+               case EFI_ACPI_MEMORY_NVS:
+               case EFI_RESERVED_TYPE:
+                       /*
+                        * Map into dom0 - We must respect protection
+                        * and cache attributes.  Not all of these pages
+                        * are writable!!!
+                        */
+                       flags = ASSIGN_writable;        /* dummy - zero */
+                       if (md->attribute & EFI_MEMORY_WP)
+                               flags |= ASSIGN_readonly;
+                       if ((md->attribute & EFI_MEMORY_UC) &&
+                           !(md->attribute & EFI_MEMORY_WB))
+                               flags |= ASSIGN_nocache;
+
+                       assign_domain_mach_page(d, start, size, flags);
+
+                       /* Fall-through.  */
+               case EFI_MEMORY_MAPPED_IO:
+                       /* Will be mapped with ioremap.  */
+                       /* Copy descriptor.  */
+                       *dom_md = *md;
+                       dom_md->virt_addr = 0;
+                       num_mds++;
+                       break;
+
+               case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+                       flags = ASSIGN_writable;        /* dummy - zero */
+                       if (md->attribute & EFI_MEMORY_UC)
+                               flags |= ASSIGN_nocache;
+
+                       if (start > 0x1ffffffff0000000UL) {
+                               mpaddr = 0x4000000000000UL - size;
+                               printk(XENLOG_INFO "Remapping IO ports from "
+                                      "%lx to %lx\n", start, mpaddr);
+                       } else
+                               mpaddr = start;
+
+                       /* Map into dom0.  */
+                       assign_domain_mmio_page(d, mpaddr, start, size, flags);
+                       /* Copy descriptor.  */
+                       *dom_md = *md;
+                       dom_md->phys_addr = mpaddr;
+                       dom_md->virt_addr = 0;
+                       num_mds++;
+                       break;
+
+               case EFI_CONVENTIONAL_MEMORY:
+               case EFI_LOADER_CODE:
+               case EFI_LOADER_DATA:
+               case EFI_BOOT_SERVICES_CODE:
+               case EFI_BOOT_SERVICES_DATA:
+                       if (!(md->attribute & EFI_MEMORY_WB))
+                               break;
+
+                       start = max(FW_END_PADDR, start);
+                       end = min(start + dom_mem, end);
+                       if (end <= start)
+                               break;
+
+                       dom_md->type = EFI_CONVENTIONAL_MEMORY;
+                       dom_md->phys_addr = start;
+                       dom_md->virt_addr = 0;
+                       dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
+                       dom_md->attribute = EFI_MEMORY_WB;
+                       num_mds++;
+
+                       dom_mem -= dom_md->num_pages << EFI_PAGE_SHIFT;
+                       break;
+
+               case EFI_UNUSABLE_MEMORY:
+               case EFI_PAL_CODE:
+                       /*
+                        * We don't really need these, but holes in the
+                        * memory map may cause Linux to assume there are
+                        * uncacheable ranges within a granule.
+                        */
+                       dom_md->type = EFI_UNUSABLE_MEMORY;
+                       dom_md->phys_addr = start;
+                       dom_md->virt_addr = 0;
+                       dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
+                       dom_md->attribute = EFI_MEMORY_WB;
+                       num_mds++;
+                       break;
+
+               default:
+                       /* Print a warning but continue.  */
+                       printk("complete_dom0_memmap: warning: "
+                              "unhandled MDT entry type %u\n", md->type);
+               }
+       }
+       BUG_ON(num_mds > NUM_MEM_DESCS);
+
+       sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
+            efi_mdt_cmp, NULL);
+
+       /* setup_guest() @ libxc/xc_linux_build() arranges memory for domU.
+        * however no one arranges memory for dom0,
+        * instead we allocate pages manually.
+        */
+       for (i = 0; i < num_mds; i++) {
+               md = &tables->efi_memmap[i];
+
+               if (md->type == EFI_LOADER_DATA ||
+                   md->type == EFI_PAL_CODE ||
+                   md->type == EFI_CONVENTIONAL_MEMORY) {
+                       unsigned long start = md->phys_addr & PAGE_MASK;
+                       unsigned long end = md->phys_addr +
+                           (md->num_pages << EFI_PAGE_SHIFT);
+
+                       if (end == start) {
+                               /* md->num_pages = 0 is allowed. */
+                               continue;
+                       }
+
+                       for (addr = start; addr < end; addr += PAGE_SIZE)
+                               assign_new_domain0_page(d, addr);
+               }
+       }
+       // Map low-memory holes & unmapped MMIO for legacy drivers
+       for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
+               if (domain_page_mapped(d, addr))
+                       continue;
+
+               if (efi_mmio(addr, PAGE_SIZE)) {
+                       unsigned long flags;
+                       flags = ASSIGN_writable | ASSIGN_nocache;
+                       assign_domain_mmio_page(d, addr, addr, PAGE_SIZE,
+                                               flags);
+               }
+       }
+       setup_dom0_memmap_info(d, tables, &num_mds);
+       return num_mds;
 }
 
 /*
  * Local variables:
  * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
+ * c-set-style: "linux"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * indent-tabs-mode: t
  * End:
  */
diff -r 034f3e20ad10 -r 0e5c0cde23b1 xen/arch/ia64/xen/dom_fw_domu.c
--- a/xen/arch/ia64/xen/dom_fw_domu.c   Thu May 24 16:28:48 2007 -0600
+++ b/xen/arch/ia64/xen/dom_fw_domu.c   Fri May 25 09:33:01 2007 -0600
@@ -47,173 +47,179 @@
 #include <asm/dom_fw.h>
 #include <asm/dom_fw_domu.h>
 
-void
-efi_systable_init_domu(struct fw_tables *tables)
+void efi_systable_init_domu(struct fw_tables *tables)
 {
-    int i = 1;
-
-    printk(XENLOG_GUEST XENLOG_INFO "DomainU EFI build up:");
-
-    tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
-    tables->efi_tables[i].table = FW_ACPI_BASE_PADDR;
-    printk(" ACPI 2.0=0x%lx",tables->efi_tables[i].table);
-    i++;
-    printk("\n");
-    BUG_ON(i > NUM_EFI_SYS_TABLES);
+       int i = 1;
+
+       printk(XENLOG_GUEST XENLOG_INFO "DomainU EFI build up:");
+
+       tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
+       tables->efi_tables[i].table = FW_ACPI_BASE_PADDR;
+       printk(" ACPI 2.0=0x%lx", tables->efi_tables[i].table);
+       i++;
+       printk("\n");
+       BUG_ON(i > NUM_EFI_SYS_TABLES);
 }
 
 #define MAKE_MD(typ, attr, start, end) \
        xen_ia64_efi_make_md((tables), &(i), (typ), (attr), (start), (end))
 
 int
-complete_domu_memmap(domain_t *d,
-                     struct fw_tables *tables,
-                     unsigned long maxmem,
-                     int num_mds,
-                     unsigned long memmap_info_pfn,
-                     unsigned long memmap_info_num_pages)
+complete_domu_memmap(domain_t * d,
+                    struct fw_tables *tables,
+                    unsigned long maxmem,
+                    int num_mds,
+                    unsigned long memmap_info_pfn,
+                    unsigned long memmap_info_num_pages)
 {
-    efi_memory_desc_t *md;
-    int i = num_mds; /* for MAKE_MD */
-    int create_memmap = 0;
-    xen_ia64_memmap_info_t* memmap_info;
-    unsigned long memmap_info_size;
-    unsigned long paddr_start;
-    unsigned long paddr_end;
-    void *p;
-    void *memmap_start;
-    void *memmap_end;
-
-    if (memmap_info_pfn == 0 || memmap_info_num_pages == 0) {
-        /* old domain builder which doesn't setup
-         * memory map. create it for compatibility */
-        memmap_info_pfn = (maxmem >> PAGE_SHIFT) - 1;
-        memmap_info_num_pages = 1;
-        create_memmap = 1;
-    }
-
-    memmap_info_size = memmap_info_num_pages << PAGE_SHIFT;
-    paddr_start = memmap_info_pfn << PAGE_SHIFT;
-    /* 3 = start info page, xenstore page and console page */
-    paddr_end = paddr_start + memmap_info_size + 3 * PAGE_SIZE;
-    memmap_info = xen_ia64_dom_fw_map(d, paddr_start);
-
-    if (memmap_info->efi_memmap_size == 0) {
-        create_memmap = 1;
-    } else if (memmap_info->efi_memdesc_size != sizeof(md[0]) ||
-               memmap_info->efi_memdesc_version !=
-               EFI_MEMORY_DESCRIPTOR_VERSION) {
-        printk(XENLOG_WARNING
-               "%s: Warning: unknown memory map "
-               "memmap size %"PRIu64" "
-               "memdesc size %"PRIu64" "
-               "version %"PRIu32"\n",
-               __func__,
-               memmap_info->efi_memmap_size,
-               memmap_info->efi_memdesc_size,
-               memmap_info->efi_memdesc_version);
-        create_memmap = 1;
-    } else if (memmap_info_size < memmap_info->efi_memmap_size) {
-        printk(XENLOG_WARNING
-               "%s: Warning: too short memmap info size %"PRIu64"\n",
-               __func__, memmap_info_size);
-        xen_ia64_dom_fw_unmap(d, memmap_info);
-        return -EINVAL;
-    } else if (memmap_info->efi_memmap_size >
-           PAGE_SIZE - sizeof(*memmap_info)) {
-        /*
-         * curently memmap spanning more than single page isn't
-         * supported.
-         */
-        printk(XENLOG_WARNING
-               "%s: Warning: too large efi_memmap_size %"PRIu64"\n",
-               __func__, memmap_info->efi_memmap_size);
-        xen_ia64_dom_fw_unmap(d, memmap_info);
-        return -ENOSYS;
-    }
-    
-    if (create_memmap) {
-        /*
-         * old domain builder which doesn't setup
-         * memory map. create it for compatibility
-         */
-        memmap_info->efi_memdesc_size = sizeof(md[0]);
-        memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
-        memmap_info->efi_memmap_size = 1 * sizeof(md[0]);
-
-        md = (efi_memory_desc_t*)&memmap_info->memdesc;
-        md[num_mds].type = EFI_CONVENTIONAL_MEMORY;
-        md[num_mds].pad = 0;
-        md[num_mds].phys_addr = 0;
-        md[num_mds].virt_addr = 0;
-        md[num_mds].num_pages = maxmem >> EFI_PAGE_SHIFT;
-        md[num_mds].attribute = EFI_MEMORY_WB;
-    }
-
-    memmap_start = &memmap_info->memdesc;
-    memmap_end = memmap_start + memmap_info->efi_memmap_size;
-
-    /* XXX Currently the table must be in a single page. */
-    if ((unsigned long)memmap_end > (unsigned long)memmap_info + PAGE_SIZE) {
-        xen_ia64_dom_fw_unmap(d, memmap_info);
-        return -EINVAL;
-    }
-
-    /* sort it bofore use
-     * XXX: this is created by user space domain builder so that
-     * we should check its integrity */
-    sort(&memmap_info->memdesc,
-         memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
-         memmap_info->efi_memdesc_size,
-         efi_mdt_cmp, NULL);
-
-    for (p = memmap_start; p < memmap_end; p += memmap_info->efi_memdesc_size) 
{
-        unsigned long start;
-        unsigned long end;
-
-        md = p;
-        start = md->phys_addr;
-        end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
-        if (start < FW_END_PADDR)
-            start = FW_END_PADDR;
-        if (end <= start)
-            continue;
-
-        /* exclude [paddr_start, paddr_end) */
-        if (paddr_end <= start || end <= paddr_start) {
-            MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start, end);
-        } else if (paddr_start <= start && paddr_end < end) {
-            MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, paddr_end, end);
-        } else if (start < paddr_start && end <= paddr_end) {
-            MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start, 
paddr_start);
-        } else {
-            MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start, 
paddr_start);
-            MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, paddr_end, end);
-        }
-    }
-
-    /* memmap info page. */
-    MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB, paddr_start, paddr_end);
-
-    /* Create an entry for IO ports.  */
-    MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
-            IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
-
-    num_mds = i;
-    sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
-         efi_mdt_cmp, NULL);
-
-    xen_ia64_dom_fw_unmap(d, memmap_info);
-    return num_mds;
+       efi_memory_desc_t *md;
+       int i = num_mds;        /* for MAKE_MD */
+       int create_memmap = 0;
+       xen_ia64_memmap_info_t *memmap_info;
+       unsigned long memmap_info_size;
+       unsigned long paddr_start;
+       unsigned long paddr_end;
+       void *p;
+       void *memmap_start;
+       void *memmap_end;
+
+       if (memmap_info_pfn == 0 || memmap_info_num_pages == 0) {
+               /* old domain builder which doesn't setup
+                * memory map. create it for compatibility */
+               memmap_info_pfn = (maxmem >> PAGE_SHIFT) - 1;
+               memmap_info_num_pages = 1;
+               create_memmap = 1;
+       }
+
+       memmap_info_size = memmap_info_num_pages << PAGE_SHIFT;
+       paddr_start = memmap_info_pfn << PAGE_SHIFT;
+       /* 3 = start info page, xenstore page and console page */
+       paddr_end = paddr_start + memmap_info_size + 3 * PAGE_SIZE;
+       memmap_info = xen_ia64_dom_fw_map(d, paddr_start);
+
+       if (memmap_info->efi_memmap_size == 0) {
+               create_memmap = 1;
+       } else if (memmap_info->efi_memdesc_size != sizeof(md[0]) ||
+                  memmap_info->efi_memdesc_version !=
+                  EFI_MEMORY_DESCRIPTOR_VERSION) {
+               printk(XENLOG_WARNING
+                      "%s: Warning: unknown memory map "
+                      "memmap size %" PRIu64 " "
+                      "memdesc size %" PRIu64 " "
+                      "version %" PRIu32 "\n",
+                      __func__,
+                      memmap_info->efi_memmap_size,
+                      memmap_info->efi_memdesc_size,
+                      memmap_info->efi_memdesc_version);
+               create_memmap = 1;
+       } else if (memmap_info_size < memmap_info->efi_memmap_size) {
+               printk(XENLOG_WARNING
+                      "%s: Warning: too short memmap info size %" PRIu64 "\n",
+                      __func__, memmap_info_size);
+               xen_ia64_dom_fw_unmap(d, memmap_info);
+               return -EINVAL;
+       } else if (memmap_info->efi_memmap_size >
+                  PAGE_SIZE - sizeof(*memmap_info)) {
+               /*
+                * curently memmap spanning more than single page isn't
+                * supported.
+                */
+               printk(XENLOG_WARNING
+                      "%s: Warning: too large efi_memmap_size %" PRIu64 "\n",
+                      __func__, memmap_info->efi_memmap_size);
+               xen_ia64_dom_fw_unmap(d, memmap_info);
+               return -ENOSYS;
+       }
+
+       if (create_memmap) {
+               /*
+                * old domain builder which doesn't setup
+                * memory map. create it for compatibility
+                */
+               memmap_info->efi_memdesc_size = sizeof(md[0]);
+               memmap_info->efi_memdesc_version =
+                   EFI_MEMORY_DESCRIPTOR_VERSION;
+               memmap_info->efi_memmap_size = 1 * sizeof(md[0]);
+
+               md = (efi_memory_desc_t *) & memmap_info->memdesc;
+               md[num_mds].type = EFI_CONVENTIONAL_MEMORY;
+               md[num_mds].pad = 0;
+               md[num_mds].phys_addr = 0;
+               md[num_mds].virt_addr = 0;
+               md[num_mds].num_pages = maxmem >> EFI_PAGE_SHIFT;
+               md[num_mds].attribute = EFI_MEMORY_WB;
+       }
+
+       memmap_start = &memmap_info->memdesc;
+       memmap_end = memmap_start + memmap_info->efi_memmap_size;
+
+       /* XXX Currently the table must be in a single page. */
+       if ((unsigned long)memmap_end > (unsigned long)memmap_info + PAGE_SIZE) 
{
+               xen_ia64_dom_fw_unmap(d, memmap_info);
+               return -EINVAL;
+       }
+
+       /* sort it bofore use
+        * XXX: this is created by user space domain builder so that
+        * we should check its integrity */
+       sort(&memmap_info->memdesc,
+            memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
+            memmap_info->efi_memdesc_size, efi_mdt_cmp, NULL);
+
+       for (p = memmap_start; p < memmap_end;
+            p += memmap_info->efi_memdesc_size) {
+               unsigned long start;
+               unsigned long end;
+
+               md = p;
+               start = md->phys_addr;
+               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+               if (start < FW_END_PADDR)
+                       start = FW_END_PADDR;
+               if (end <= start)
+                       continue;
+
+               /* exclude [paddr_start, paddr_end) */
+               if (paddr_end <= start || end <= paddr_start) {
+                       MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start,
+                               end);
+               } else if (paddr_start <= start && paddr_end < end) {
+                       MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
+                               paddr_end, end);
+               } else if (start < paddr_start && end <= paddr_end) {
+                       MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start,
+                               paddr_start);
+               } else {
+                       MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB, start,
+                               paddr_start);
+                       MAKE_MD(EFI_CONVENTIONAL_MEMORY, EFI_MEMORY_WB,
+                               paddr_end, end);
+               }
+       }
+
+       /* memmap info page. */
+       MAKE_MD(EFI_RUNTIME_SERVICES_DATA, EFI_MEMORY_WB, paddr_start,
+               paddr_end);
+
+       /* Create an entry for IO ports.  */
+       MAKE_MD(EFI_MEMORY_MAPPED_IO_PORT_SPACE, EFI_MEMORY_UC,
+               IO_PORTS_PADDR, IO_PORTS_PADDR + IO_PORTS_SIZE);
+
+       num_mds = i;
+       sort(tables->efi_memmap, num_mds, sizeof(efi_memory_desc_t),
+            efi_mdt_cmp, NULL);
+
+       xen_ia64_dom_fw_unmap(d, memmap_info);
+       return num_mds;
 }
 
 /*
  * Local variables:
  * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
+ * c-set-style: "linux"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * indent-tabs-mode: t
  * End:
  */
diff -r 034f3e20ad10 -r 0e5c0cde23b1 xen/arch/ia64/xen/dom_fw_utils.c
--- a/xen/arch/ia64/xen/dom_fw_utils.c  Thu May 24 16:28:48 2007 -0600
+++ b/xen/arch/ia64/xen/dom_fw_utils.c  Fri May 25 09:33:01 2007 -0600
@@ -30,103 +30,99 @@
 
 #include <linux/sort.h>
 
-uint32_t
-xen_ia64_version(struct domain *unused)
-{
-    return (xen_major_version() << 16) | xen_minor_version();
-}
-
-int
-xen_ia64_fpswa_revision(struct domain *d, unsigned int *revision)
-{
-    if (fpswa_interface == NULL)
-        return -ENOSYS;
-
-    *revision = fpswa_interface->revision;
-    return 0;
-}
-
-int
-xen_ia64_is_vcpu_allocated(struct domain *d, uint32_t vcpu)
-{
-    return d->vcpu[vcpu] != NULL;
+uint32_t xen_ia64_version(struct domain *unused)
+{
+       return (xen_major_version() << 16) | xen_minor_version();
+}
+
+int xen_ia64_fpswa_revision(struct domain *d, unsigned int *revision)
+{
+       if (fpswa_interface == NULL)
+               return -ENOSYS;
+
+       *revision = fpswa_interface->revision;
+       return 0;
+}
+
+int xen_ia64_is_vcpu_allocated(struct domain *d, uint32_t vcpu)
+{
+       return d->vcpu[vcpu] != NULL;
 }
 
 int xen_ia64_is_running_on_sim(struct domain *unused)
 {
-    extern unsigned long running_on_sim;
-    return running_on_sim;
-}
-
-int
-xen_ia64_is_dom0(struct domain *d)
-{
-    return d == dom0;
-}
-
-static void
-dom_fw_domain_init(struct domain *d, struct fw_tables *tables)
-{
-    /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
-    d->arch.efi_runtime = &tables->efi_runtime;
-    d->arch.fpswa_inf   = &tables->fpswa_inf;
-    d->arch.sal_data    = &tables->sal_data;
-}
-
-static int
-dom_fw_set_convmem_end(struct domain *d)
-{
-    xen_ia64_memmap_info_t* memmap_info;
-    efi_memory_desc_t *md;
-    void *p;
-    void *memmap_start;
-    void *memmap_end;
-
-    if (d->shared_info->arch.memmap_info_pfn == 0)
-        return -EINVAL;
-
-    memmap_info = domain_mpa_to_imva(d, d->shared_info->arch.memmap_info_pfn 
<< PAGE_SHIFT);
-    if (memmap_info->efi_memmap_size == 0 ||
-        memmap_info->efi_memdesc_size != sizeof(*md) ||
-        memmap_info->efi_memdesc_version !=
-        EFI_MEMORY_DESCRIPTOR_VERSION)
-        return -EINVAL;
-
-    /* only 1page case is supported */
-    if (d->shared_info->arch.memmap_info_num_pages != 1)
-        return -ENOSYS;
-
-    memmap_start = &memmap_info->memdesc;
-    memmap_end = memmap_start + memmap_info->efi_memmap_size;
-
-    /* XXX Currently the table must be in a single page. */
-    if ((unsigned long)memmap_end > (unsigned long)memmap_info + PAGE_SIZE)
-        return -EINVAL;
-
-    /* sort it bofore use
-     * XXX: this is created by user space domain builder so that
-     * we should check its integrity */
-    sort(&memmap_info->memdesc,
-         memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
-         memmap_info->efi_memdesc_size,
-         efi_mdt_cmp, NULL);
-
-    if (d->arch.convmem_end == 0)
-        d->arch.convmem_end = d->max_pages << PAGE_SHIFT;
-
-    for (p = memmap_start; p < memmap_end; p += memmap_info->efi_memdesc_size) 
{
-        unsigned long end;
-
-        md = p;
-        end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
-
-        if (md->attribute == EFI_MEMORY_WB &&
-            md->type == EFI_CONVENTIONAL_MEMORY &&
-            md->num_pages > 0 &&
-            d->arch.convmem_end < end)
-            d->arch.convmem_end = end;
-    }
-    return 0;
+       extern unsigned long running_on_sim;
+       return running_on_sim;
+}
+
+int xen_ia64_is_dom0(struct domain *d)
+{
+       return d == dom0;
+}
+
+static void dom_fw_domain_init(struct domain *d, struct fw_tables *tables)
+{
+       /* Initialise for EFI_SET_VIRTUAL_ADDRESS_MAP emulation */
+       d->arch.efi_runtime = &tables->efi_runtime;
+       d->arch.fpswa_inf = &tables->fpswa_inf;
+       d->arch.sal_data = &tables->sal_data;
+}
+
+static int dom_fw_set_convmem_end(struct domain *d)
+{
+       xen_ia64_memmap_info_t *memmap_info;
+       efi_memory_desc_t *md;
+       void *p;
+       void *memmap_start;
+       void *memmap_end;
+
+       if (d->shared_info->arch.memmap_info_pfn == 0)
+               return -EINVAL;
+
+       memmap_info =
+           domain_mpa_to_imva(d,
+                              d->shared_info->arch.
+                              memmap_info_pfn << PAGE_SHIFT);
+       if (memmap_info->efi_memmap_size == 0
+           || memmap_info->efi_memdesc_size != sizeof(*md)
+           || memmap_info->efi_memdesc_version !=
+           EFI_MEMORY_DESCRIPTOR_VERSION)
+               return -EINVAL;
+
+       /* only 1page case is supported */
+       if (d->shared_info->arch.memmap_info_num_pages != 1)
+               return -ENOSYS;
+
+       memmap_start = &memmap_info->memdesc;
+       memmap_end = memmap_start + memmap_info->efi_memmap_size;
+
+       /* XXX Currently the table must be in a single page. */
+       if ((unsigned long)memmap_end > (unsigned long)memmap_info + PAGE_SIZE)
+               return -EINVAL;
+
+       /* sort it bofore use
+        * XXX: this is created by user space domain builder so that
+        * we should check its integrity */
+       sort(&memmap_info->memdesc,
+            memmap_info->efi_memmap_size / memmap_info->efi_memdesc_size,
+            memmap_info->efi_memdesc_size, efi_mdt_cmp, NULL);
+
+       if (d->arch.convmem_end == 0)
+               d->arch.convmem_end = d->max_pages << PAGE_SHIFT;
+
+       for (p = memmap_start; p < memmap_end;
+            p += memmap_info->efi_memdesc_size) {
+               unsigned long end;
+
+               md = p;
+               end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+               if (md->attribute == EFI_MEMORY_WB &&
+                   md->type == EFI_CONVENTIONAL_MEMORY &&
+                   md->num_pages > 0 && d->arch.convmem_end < end)
+                       d->arch.convmem_end = end;
+       }
+       return 0;
 }
 
 /* allocate a page for fw
@@ -135,100 +131,101 @@ static inline void
 static inline void
 assign_new_domain_page_if_dom0(struct domain *d, unsigned long mpaddr)
 {
-    if (d == dom0)
-        assign_new_domain0_page(d, mpaddr);
+       if (d == dom0)
+               assign_new_domain0_page(d, mpaddr);
 }
 
 static void
 dom_fw_setup_for_domain_restore(domain_t *d, unsigned long maxmem)
 {
-    assign_new_domain_page(d, FW_HYPERCALL_BASE_PADDR);
-    dom_fw_domain_init(d, domain_mpa_to_imva(d, FW_TABLES_BASE_PADDR));
-    d->arch.convmem_end = maxmem;
-}
-
-int
-dom_fw_setup(domain_t *d, unsigned long bp_mpa, unsigned long maxmem)
-{
-    int old_domu_builder = 0;
-    struct xen_ia64_boot_param *bp;
-    struct fw_tables *imva_tables_base;
-
-    BUILD_BUG_ON(sizeof(struct fw_tables) >
-                 (FW_TABLES_END_PADDR - FW_TABLES_BASE_PADDR));
-
-    if (bp_mpa == 0) {
-        /* bp_mpa == 0 means this is domain restore case. */
-        dom_fw_setup_for_domain_restore(d, maxmem);
-        return 0;
-    }
-
-    /* Create page for boot_param.  */
-    assign_new_domain_page_if_dom0(d, bp_mpa);
-    bp = domain_mpa_to_imva(d, bp_mpa);
-    if (d != dom0) {
-        /*
-         * XXX kludge.
-         * when XEN_DOMCTL_arch_setup is called, shared_info can't
-         * be accessed by libxc so that memmap_info_pfn isn't
-         * initialized. But dom_fw_set_convmem_end() requires it, 
-         * so here we initialize it.
-         * note: domain builder may overwrite memmap_info_num_pages,
-         *       memmap_info_pfns later.
-         */
-        if (bp->efi_memmap_size == 0 || 
-            XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp) == 0 ||
-            XEN_IA64_MEMMAP_INFO_PFN(bp) == 0) {
-            /* old domain builder compatibility */
-            d->shared_info->arch.memmap_info_num_pages = 1;
-            d->shared_info->arch.memmap_info_pfn = (maxmem >> PAGE_SHIFT) - 1;
-            old_domu_builder = 1;
-        } else {
-            d->shared_info->arch.memmap_info_num_pages =
-                XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp);
-            d->shared_info->arch.memmap_info_pfn =
-                XEN_IA64_MEMMAP_INFO_PFN(bp);
-            /* currently multi page memmap isn't supported */
-            if (d->shared_info->arch.memmap_info_num_pages != 1)
-                return -ENOSYS;
-        }
-    }
-
-    /* Create page for FW tables.  */
-    assign_new_domain_page_if_dom0(d, FW_TABLES_BASE_PADDR);
-    imva_tables_base = (struct fw_tables *)domain_mpa_to_imva
-                                      (d, FW_TABLES_BASE_PADDR);
-    /* Create page for acpi tables.  */
-    if (d != dom0 && old_domu_builder) {
-        struct fake_acpi_tables *imva;
-        imva = domain_mpa_to_imva(d, FW_ACPI_BASE_PADDR);
-        dom_fw_fake_acpi(d, imva);
-    }
-    if (d == dom0 || old_domu_builder) {
-        int ret;
-        unsigned long imva_hypercall_base;
-
-        /* Create page for hypercalls.  */
-        assign_new_domain_page_if_dom0(d, FW_HYPERCALL_BASE_PADDR);
-        imva_hypercall_base = (unsigned long)domain_mpa_to_imva
-            (d, FW_HYPERCALL_BASE_PADDR);
-
-        ret = dom_fw_init(d, d->arch.breakimm, bp,
-                          imva_tables_base, imva_hypercall_base, maxmem);
-        if (ret < 0)
-            return ret;
-    }
-
-    dom_fw_domain_init(d, imva_tables_base);
-    return dom_fw_set_convmem_end(d);
+       assign_new_domain_page(d, FW_HYPERCALL_BASE_PADDR);
+       dom_fw_domain_init(d, domain_mpa_to_imva(d, FW_TABLES_BASE_PADDR));
+       d->arch.convmem_end = maxmem;
+}
+
+int dom_fw_setup(domain_t * d, unsigned long bp_mpa, unsigned long maxmem)
+{
+       int old_domu_builder = 0;
+       struct xen_ia64_boot_param *bp;
+       struct fw_tables *imva_tables_base;
+
+       BUILD_BUG_ON(sizeof(struct fw_tables) >
+                    (FW_TABLES_END_PADDR - FW_TABLES_BASE_PADDR));
+
+       if (bp_mpa == 0) {
+               /* bp_mpa == 0 means this is domain restore case. */
+               dom_fw_setup_for_domain_restore(d, maxmem);
+               return 0;
+       }
+
+       /* Create page for boot_param.  */
+       assign_new_domain_page_if_dom0(d, bp_mpa);
+       bp = domain_mpa_to_imva(d, bp_mpa);
+       if (d != dom0) {
+               /*
+                * XXX kludge.
+                * when XEN_DOMCTL_arch_setup is called, shared_info can't
+                * be accessed by libxc so that memmap_info_pfn isn't
+                * initialized. But dom_fw_set_convmem_end() requires it, 
+                * so here we initialize it.
+                * note: domain builder may overwrite memmap_info_num_pages,
+                *       memmap_info_pfns later.
+                */
+               if (bp->efi_memmap_size == 0 ||
+                   XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp) == 0 ||
+                   XEN_IA64_MEMMAP_INFO_PFN(bp) == 0) {
+                       /* old domain builder compatibility */
+                       d->shared_info->arch.memmap_info_num_pages = 1;
+                       d->shared_info->arch.memmap_info_pfn =
+                           (maxmem >> PAGE_SHIFT) - 1;
+                       old_domu_builder = 1;
+               } else {
+                       d->shared_info->arch.memmap_info_num_pages =
+                           XEN_IA64_MEMMAP_INFO_NUM_PAGES(bp);
+                       d->shared_info->arch.memmap_info_pfn =
+                           XEN_IA64_MEMMAP_INFO_PFN(bp);
+                       /* currently multi page memmap isn't supported */
+                       if (d->shared_info->arch.memmap_info_num_pages != 1)
+                               return -ENOSYS;
+               }
+       }
+
+       /* Create page for FW tables.  */
+       assign_new_domain_page_if_dom0(d, FW_TABLES_BASE_PADDR);
+       imva_tables_base = (struct fw_tables *)domain_mpa_to_imva
+           (d, FW_TABLES_BASE_PADDR);
+       /* Create page for acpi tables.  */
+       if (d != dom0 && old_domu_builder) {
+               struct fake_acpi_tables *imva;
+               imva = domain_mpa_to_imva(d, FW_ACPI_BASE_PADDR);
+               dom_fw_fake_acpi(d, imva);
+       }
+       if (d == dom0 || old_domu_builder) {
+               int ret;
+               unsigned long imva_hypercall_base;
+
+               /* Create page for hypercalls.  */
+               assign_new_domain_page_if_dom0(d, FW_HYPERCALL_BASE_PADDR);
+               imva_hypercall_base = (unsigned long)domain_mpa_to_imva
+                   (d, FW_HYPERCALL_BASE_PADDR);
+
+               ret = dom_fw_init(d, d->arch.breakimm, bp,
+                                 imva_tables_base, imva_hypercall_base,
+                                 maxmem);
+               if (ret < 0)
+                       return ret;
+       }
+
+       dom_fw_domain_init(d, imva_tables_base);
+       return dom_fw_set_convmem_end(d);
 }
 
 /*
  * Local variables:
  * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
+ * c-set-style: "linux"
+ * c-basic-offset: 8
+ * tab-width: 8
+ * indent-tabs-mode: t
  * End:
  */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.