[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-ia64-devel] [patch 4/5] sn2+machvec support


  • To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
  • From: Jes Sorensen <jes@xxxxxxx>
  • Date: Wed, 13 Dec 2006 18:12:31 +0100
  • Delivery-date: Wed, 13 Dec 2006 09:12:35 -0800
  • List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>

Changes required for the new sn2+machvec files to build under Xen.

Cheers,
Jes
# HG changeset patch
# User jes@xxxxxxxxxxxxxxxx
# Date 1166027418 -3600
# Node ID 10c1f9a049e3f6eaceda7b29200fff0a020cf325
# Parent  f936608bf9f88acae521a9cbe86ae253eede6fde
Updates to new files to be able to build sn2 and machvec support. In addition 
update sn_sal.h and list.h to be in sync with linux-2.6.19

diff -r f936608bf9f8 -r 10c1f9a049e3 xen/arch/ia64/linux-xen/sn/kernel/io_init.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/io_init.c       Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c       Wed Dec 13 17:30:18 
2006 +0100
@@ -8,6 +8,7 @@
 
 #include <linux/bootmem.h>
 #include <linux/nodemask.h>
+#include <linux/init.h>
 #include <asm/sn/types.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/sn_feature_sets.h>
@@ -17,17 +18,29 @@
 #include <asm/sn/module.h>
 #include <asm/sn/pcibr_provider.h>
 #include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
 #include <asm/sn/pcidev.h>
+#endif
 #include <asm/sn/simulator.h>
 #include <asm/sn/sn_sal.h>
+#ifndef XEN
 #include <asm/sn/tioca_provider.h>
 #include <asm/sn/tioce_provider.h>
+#endif
+#ifdef XEN
+#include "asm/sn/hubdev.h"
+#include "asm/sn/xwidgetdev.h"
+#else
 #include "xtalk/hubdev.h"
 #include "xtalk/xwidgetdev.h"
+#endif
 
 
 extern void sn_init_cpei_timer(void);
 extern void register_sn_procfs(void);
+#ifdef XEN
+extern void sn_irq_lh_init(void);
+#endif
 
 static struct list_head sn_sysdata_list;
 
@@ -50,6 +63,7 @@ int sn_ioif_inited;           /* SN I/O infrastru
 
 struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];      /* 
indexed by asic type */
 
+#ifndef XEN
 static int max_segment_number;          /* Default highest segment number */
 static int max_pcibus_number = 255;    /* Default highest pci bus number */
 
@@ -81,6 +95,7 @@ static struct sn_pcibus_provider sn_pci_
        .dma_unmap = sn_default_pci_unmap,
        .bus_fixup = sn_default_pci_bus_fixup,
 };
+#endif
 
 /*
  * Retrieve the DMA Flush List given nasid, widget, and device.
@@ -131,6 +146,7 @@ static inline u64 sal_get_pcibus_info(u6
        return ret_stuff.v0;
 }
 
+#ifndef XEN
 /*
  * Retrieve the pci device information given the bus and device|function 
number.
  */
@@ -281,9 +297,13 @@ static void __init sn_fixup_ionodes(void
                                                     nasid, widget, device,
                                                     (u64)(dev_entry->common));
                                else
+#ifdef XEN
+                                       BUG();
+#else
                                        status = sn_device_fixup_war(nasid,
                                                     widget, device,
                                                     dev_entry->common);
+#endif
                                if (status != SALRET_OK)
                                        panic("SAL call failed: %s\n",
                                              ia64_sal_strerror(status));
@@ -614,6 +634,7 @@ void sn_bus_free_sysdata(void)
        }
        return;
 }
+#endif
 
 /*
  * Ugly hack to get PCI setup until we have a proper ACPI namespace.
@@ -629,6 +650,7 @@ static int __init sn_pci_init(void)
        if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
                return 0;
 
+#ifndef XEN
        /*
         * prime sn_pci_provider[].  Individial provider init routines will
         * override their respective default entries.
@@ -640,14 +662,18 @@ static int __init sn_pci_init(void)
        pcibr_init_provider();
        tioca_init_provider();
        tioce_init_provider();
+#endif
 
        /*
         * This is needed to avoid bounce limit checks in the blk layer
         */
        ia64_max_iommu_merge_mask = ~PAGE_MASK;
+#ifndef XEN
        sn_fixup_ionodes();
+#endif
        sn_irq_lh_init();
        INIT_LIST_HEAD(&sn_sysdata_list);
+#ifndef XEN
        sn_init_cpei_timer();
 
 #ifdef CONFIG_PROC_FS
@@ -668,6 +694,7 @@ static int __init sn_pci_init(void)
        while ((pci_dev =
                pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
                sn_pci_fixup_slot(pci_dev);
+#endif
 
        sn_ioif_inited = 1;     /* sn I/O infrastructure now initialized */
 
@@ -682,7 +709,7 @@ void hubdev_init_node(nodepda_t * npda, 
 {
        struct hubdev_info *hubdev_info;
        int size;
-       pg_data_t *pg;
+       struct pglist_data *pg;
 
        size = sizeof(struct hubdev_info);
 
@@ -705,6 +732,7 @@ cnodeid_get_geoid(cnodeid_t cnode)
        return hubdev->hdi_geoid;
 }
 
+#ifndef XEN
 void sn_generate_path(struct pci_bus *pci_bus, char *address)
 {
        nasid_t nasid;
@@ -730,11 +758,18 @@ void sn_generate_path(struct pci_bus *pc
            (bricktype == L1_BRICKTYPE_1932))
                        sprintf(address, "%s^%d", address, geo_slot(geoid));
 }
-
+#endif
+
+#ifdef XEN
+__initcall(sn_pci_init);
+#else
 subsys_initcall(sn_pci_init);
+#endif
+#ifndef XEN
 EXPORT_SYMBOL(sn_pci_fixup_slot);
 EXPORT_SYMBOL(sn_pci_unfixup_slot);
 EXPORT_SYMBOL(sn_pci_controller_fixup);
 EXPORT_SYMBOL(sn_bus_store_sysdata);
 EXPORT_SYMBOL(sn_bus_free_sysdata);
 EXPORT_SYMBOL(sn_generate_path);
+#endif
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/arch/ia64/linux-xen/sn/kernel/iomv.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/iomv.c  Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c  Wed Dec 13 17:30:18 2006 +0100
@@ -9,7 +9,9 @@
 #include <linux/module.h>
 #include <asm/io.h>
 #include <asm/delay.h>
+#ifndef XEN
 #include <asm/vga.h>
+#endif
 #include <asm/sn/nodepda.h>
 #include <asm/sn/simulator.h>
 #include <asm/sn/pda.h>
@@ -18,6 +20,10 @@
 
 #define IS_LEGACY_VGA_IOPORT(p) \
        (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
+
+#ifdef XEN
+#define vga_console_iobase     0
+#endif
 
 /**
  * sn_io_addr - convert an in/out port to an i/o address
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/arch/ia64/linux-xen/sn/kernel/irq.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/irq.c   Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c   Wed Dec 13 17:30:18 2006 +0100
@@ -11,14 +11,22 @@
 #include <linux/irq.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/hw_irq.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/arch.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/pcibr_provider.h>
 #include <asm/sn/pcibus_provider_defs.h>
+#ifndef XEN
 #include <asm/sn/pcidev.h>
+#endif
 #include <asm/sn/shub_mmr.h>
 #include <asm/sn/sn_sal.h>
+
+#ifdef XEN
+#define move_native_irq(foo)   do{}while(0)
+#endif
 
 static void force_interrupt(int irq);
 static void register_intr_pda(struct sn_irq_info *sn_irq_info);
@@ -111,6 +119,7 @@ static void sn_end_irq(unsigned int irq)
                force_interrupt(irq);
 }
 
+#ifndef XEN
 static void sn_irq_info_free(struct rcu_head *head);
 
 struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
@@ -175,9 +184,15 @@ struct sn_irq_info *sn_retarget_vector(s
                (pci_provider->target_interrupt)(new_irq_info);
 
        spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+       list_replace(&sn_irq_info->list, &new_irq_info->list);
+#else
        list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
-       spin_unlock(&sn_irq_info_lock);
+#endif
+       spin_unlock(&sn_irq_info_lock);
+#ifndef XEN
        call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
 
 #ifdef CONFIG_SMP
        set_irq_affinity_info((vector & 0xff), cpuphys, 0);
@@ -199,16 +214,21 @@ static void sn_set_affinity_irq(unsigned
                                 sn_irq_lh[irq], list)
                (void)sn_retarget_vector(sn_irq_info, nasid, slice);
 }
+#endif
 
 struct hw_interrupt_type irq_type_sn = {
+#ifndef XEN
        .name           = "SN hub",
+#endif
        .startup        = sn_startup_irq,
        .shutdown       = sn_shutdown_irq,
        .enable         = sn_enable_irq,
        .disable        = sn_disable_irq,
        .ack            = sn_ack_irq,
        .end            = sn_end_irq,
+#ifndef XEN
        .set_affinity   = sn_set_affinity_irq
+#endif
 };
 
 unsigned int sn_local_vector_to_irq(u8 vector)
@@ -221,6 +241,7 @@ void sn_irq_init(void)
        int i;
        irq_desc_t *base_desc = irq_desc;
 
+#ifndef XEN
        ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
        ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
 
@@ -229,6 +250,7 @@ void sn_irq_init(void)
                        base_desc[i].chip = &irq_type_sn;
                }
        }
+#endif
 }
 
 static void register_intr_pda(struct sn_irq_info *sn_irq_info)
@@ -251,14 +273,24 @@ static void unregister_intr_pda(struct s
        struct sn_irq_info *tmp_irq_info;
        int i, foundmatch;
 
+#ifndef XEN
        rcu_read_lock();
+#else
+       spin_lock(&sn_irq_info_lock);
+#endif
        if (pdacpu(cpu)->sn_last_irq == irq) {
                foundmatch = 0;
                for (i = pdacpu(cpu)->sn_last_irq - 1;
                     i && !foundmatch; i--) {
+#ifdef XEN
+                       list_for_each_entry(tmp_irq_info,
+                                               sn_irq_lh[i],
+                                               list) {
+#else
                        list_for_each_entry_rcu(tmp_irq_info,
                                                sn_irq_lh[i],
                                                list) {
+#endif
                                if (tmp_irq_info->irq_cpuid == cpu) {
                                        foundmatch = 1;
                                        break;
@@ -272,9 +304,15 @@ static void unregister_intr_pda(struct s
                foundmatch = 0;
                for (i = pdacpu(cpu)->sn_first_irq + 1;
                     i < NR_IRQS && !foundmatch; i++) {
+#ifdef XEN
+                       list_for_each_entry(tmp_irq_info,
+                                               sn_irq_lh[i],
+                                               list) {
+#else
                        list_for_each_entry_rcu(tmp_irq_info,
                                                sn_irq_lh[i],
                                                list) {
+#endif
                                if (tmp_irq_info->irq_cpuid == cpu) {
                                        foundmatch = 1;
                                        break;
@@ -283,9 +321,14 @@ static void unregister_intr_pda(struct s
                }
                pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
        }
+#ifndef XEN
        rcu_read_unlock();
-}
-
+#else
+       spin_unlock(&sn_irq_info_lock);
+#endif
+}
+
+#ifndef XEN
 static void sn_irq_info_free(struct rcu_head *head)
 {
        struct sn_irq_info *sn_irq_info;
@@ -293,7 +336,9 @@ static void sn_irq_info_free(struct rcu_
        sn_irq_info = container_of(head, struct sn_irq_info, rcu);
        kfree(sn_irq_info);
 }
-
+#endif
+
+#ifndef XEN
 void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
 {
        nasid_t nasid = sn_irq_info->irq_nasid;
@@ -306,8 +351,14 @@ void sn_irq_fixup(struct pci_dev *pci_de
 
        /* link it into the sn_irq[irq] list */
        spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+       list_add(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#else
        list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+#endif
+#ifndef XEN
        reserve_irq_vector(sn_irq_info->irq_irq);
+#endif
        spin_unlock(&sn_irq_info_lock);
 
        register_intr_pda(sn_irq_info);
@@ -331,14 +382,21 @@ void sn_irq_unfixup(struct pci_dev *pci_
 
        unregister_intr_pda(sn_irq_info);
        spin_lock(&sn_irq_info_lock);
+#ifdef XEN
+       list_del(&sn_irq_info->list);
+#else
        list_del_rcu(&sn_irq_info->list);
+#endif
        spin_unlock(&sn_irq_info_lock);
        if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
                free_irq_vector(sn_irq_info->irq_irq);
+#ifndef XEN
        call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+#endif
        pci_dev_put(pci_dev);
 
 }
+#endif
 
 static inline void
 sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
@@ -354,16 +412,31 @@ static void force_interrupt(int irq)
 {
        struct sn_irq_info *sn_irq_info;
 
+#ifndef XEN
        if (!sn_ioif_inited)
                return;
-
+#endif
+
+#ifdef XEN
+       spin_lock(&sn_irq_info_lock);
+#else
        rcu_read_lock();
+#endif
+#ifdef XEN
+       list_for_each_entry(sn_irq_info, sn_irq_lh[irq], list)
+#else
        list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
+#endif
                sn_call_force_intr_provider(sn_irq_info);
 
+#ifdef XEN
+       spin_unlock(&sn_irq_info_lock);
+#else
        rcu_read_unlock();
-}
-
+#endif
+}
+
+#ifndef XEN
 /*
  * Check for lost interrupts.  If the PIC int_status reg. says that
  * an interrupt has been sent, but not handled, and the interrupt
@@ -408,22 +481,41 @@ static void sn_check_intr(int irq, struc
        }
        sn_irq_info->irq_last_intr = regval;
 }
+#endif
 
 void sn_lb_int_war_check(void)
 {
        struct sn_irq_info *sn_irq_info;
        int i;
 
+#ifndef XEN
+#ifdef XEN
+       if (pda->sn_first_irq == 0)
+#else
        if (!sn_ioif_inited || pda->sn_first_irq == 0)
-               return;
-
+#endif
+               return;
+
+#ifdef XEN
+       spin_lock(&sn_irq_info_lock);
+#else
        rcu_read_lock();
+#endif
        for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
+#ifdef XEN
+               list_for_each_entry(sn_irq_info, sn_irq_lh[i], list) {
+#else
                list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
+#endif
                        sn_check_intr(i, sn_irq_info);
                }
        }
+#ifdef XEN
+       spin_unlock(&sn_irq_info_lock);
+#else
        rcu_read_unlock();
+#endif
+#endif
 }
 
 void __init sn_irq_lh_init(void)
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/arch/ia64/linux-xen/sn/kernel/setup.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/setup.c Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/setup.c Wed Dec 13 17:30:18 2006 +0100
@@ -10,9 +10,13 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
+#ifndef XEN
 #include <linux/kdev_t.h>
+#endif
 #include <linux/string.h>
+#ifndef XEN
 #include <linux/screen_info.h>
+#endif
 #include <linux/console.h>
 #include <linux/timex.h>
 #include <linux/sched.h>
@@ -26,7 +30,9 @@
 #include <linux/acpi.h>
 #include <linux/compiler.h>
 #include <linux/sched.h>
+#ifndef XEN
 #include <linux/root_dev.h>
+#endif
 #include <linux/nodemask.h>
 #include <linux/pm.h>
 #include <linux/efi.h>
@@ -36,7 +42,9 @@
 #include <asm/machvec.h>
 #include <asm/system.h>
 #include <asm/processor.h>
+#ifndef XEN
 #include <asm/vga.h>
+#endif
 #include <asm/sn/arch.h>
 #include <asm/sn/addrs.h>
 #include <asm/sn/pda.h>
@@ -44,15 +52,32 @@
 #include <asm/sn/sn_cpuid.h>
 #include <asm/sn/simulator.h>
 #include <asm/sn/leds.h>
+#ifndef XEN
 #include <asm/sn/bte.h>
+#endif
 #include <asm/sn/shub_mmr.h>
+#ifndef XEN
 #include <asm/sn/clksupport.h>
+#endif
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/geo.h>
 #include <asm/sn/sn_feature_sets.h>
+#ifndef XEN
 #include "xtalk/xwidgetdev.h"
 #include "xtalk/hubdev.h"
+#else
+#include "asm/sn/xwidgetdev.h"
+#include "asm/sn/hubdev.h"
+#endif
 #include <asm/sn/klconfig.h>
+#include <asm/sn/shubio.h>
+
+#ifdef XEN
+/* Xen has no clue about NUMA ....  grrrr */
+#define pxm_to_node(foo)               0
+#define node_to_pxm(foo)               0
+#define numa_node_id()                 0
+#endif
 
 
 DEFINE_PER_CPU(struct pda_s, pda_percpu);
@@ -107,6 +132,7 @@ static void build_cnode_tables(void);
 
 static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
 
+#ifndef XEN
 /*
  * The format of "screen_info" is strange, and due to early i386-setup
  * code. This is just enough to make the console code think we're on a
@@ -122,6 +148,7 @@ struct screen_info sn_screen_info = {
        .orig_video_isVGA = 1,
        .orig_video_points = 16
 };
+#endif
 
 /*
  * This routine can only be used during init, since
@@ -228,6 +255,7 @@ static void __init sn_check_for_wars(voi
        }
 }
 
+#ifndef XEN
 /*
  * Scan the EFI PCDP table (if it exists) for an acceptable VGA console
  * output device.  If one exists, pick it and set sn_legacy_{io,mem} to
@@ -369,6 +397,7 @@ static unsigned long long ia64_sn2_print
        return (rtc_now - sn2_rtc_initial) *
                (1000000000 / sn_rtc_cycles_per_second);
 }
+#endif
 
 /**
  * sn_setup - SN platform setup routine
@@ -378,12 +407,14 @@ static unsigned long long ia64_sn2_print
  * the RTC frequency (via a SAL call), initializing secondary CPUs, and
  * setting up per-node data areas.  The console is also initialized here.
  */
+void __cpuinit sn_cpu_init(void);
+
 void __init sn_setup(char **cmdline_p)
 {
        long status, ticks_per_sec, drift;
        u32 version = sn_sal_rev();
-       extern void sn_cpu_init(void);
-
+
+#ifndef XEN
        sn2_rtc_initial = rtc_time();
        ia64_sn_plat_set_error_handling_features();     // obsolete
        ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV);
@@ -430,6 +461,7 @@ void __init sn_setup(char **cmdline_p)
 #endif                         /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
 
        MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
+#endif
 
        /*
         * Build the tables for managing cnodes.
@@ -446,10 +478,12 @@ void __init sn_setup(char **cmdline_p)
                sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
        } else
                sn_rtc_cycles_per_second = ticks_per_sec;
+#ifndef XEN
 
        platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
 
        ia64_printk_clock = ia64_sn2_printk_clock;
+#endif
 
        printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF);
 
@@ -457,21 +491,27 @@ void __init sn_setup(char **cmdline_p)
         * we set the default root device to /dev/hda
         * to make simulation easy
         */
+#ifndef XEN
        ROOT_DEV = Root_HDA1;
+#endif
 
        /*
         * Create the PDAs and NODEPDAs for all the cpus.
         */
        sn_init_pdas(cmdline_p);
 
+#ifndef XEN
        ia64_mark_idle = &snidle;
+#endif
 
        /*
         * For the bootcpu, we do this here. All other cpus will make the
         * call as part of cpu_init in slave cpu initialization.
         */
+
        sn_cpu_init();
 
+#ifndef XEN
 #ifdef CONFIG_SMP
        init_smp_config();
 #endif
@@ -487,6 +527,7 @@ void __init sn_setup(char **cmdline_p)
         */
        pm_power_off = ia64_sn_power_down;
        current->thread.flags |= IA64_THREAD_MIGRATION;
+#endif
 }
 
 /**
@@ -526,6 +567,7 @@ static void __init sn_init_pdas(char **c
                memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
                       sizeof(nodepdaindr));
 
+#ifndef XEN
        /*
         * Set up IO related platform-dependent nodepda fields.
         * The following routine actually sets up the hubinfo struct
@@ -542,6 +584,7 @@ static void __init sn_init_pdas(char **c
        for (cnode = 0; cnode < num_cnodes; cnode++) {
                hubdev_init_node(nodepdaindr[cnode], cnode);
        }
+#endif
 }
 
 /**
@@ -565,6 +608,7 @@ void __cpuinit sn_cpu_init(void)
        static int wars_have_been_checked;
 
        cpuid = smp_processor_id();
+#ifndef XEN
        if (cpuid == 0 && IS_MEDUSA()) {
                if (ia64_sn_is_fake_prom())
                        sn_prom_type = 2;
@@ -573,6 +617,7 @@ void __cpuinit sn_cpu_init(void)
                printk(KERN_INFO "Running on medusa with %s PROM\n",
                       (sn_prom_type == 1) ? "real" : "fake");
        }
+#endif
 
        memset(pda, 0, sizeof(pda));
        if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
@@ -660,6 +705,8 @@ void __cpuinit sn_cpu_init(void)
                pda->pio_write_status_val = is_shub1() ? 
SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0;
        }
 
+#ifndef XEN
+/* local_node_data is not allocated .... yet
        /*
         * WAR addresses for SHUB 1.x.
         */
@@ -672,6 +719,7 @@ void __cpuinit sn_cpu_init(void)
                    (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
                                                              
SH1_PI_CAM_CONTROL);
        }
+#endif
 }
 
 /*
@@ -752,4 +800,3 @@ int sn_prom_feature_available(int id)
        return test_bit(id, sn_prom_features);
 }
 EXPORT_SYMBOL(sn_prom_feature_available);
-
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c       Wed Dec 13 17:30:18 
2006 +0100
@@ -34,6 +34,7 @@
 #include <asm/numa.h>
 #include <asm/hw_irq.h>
 #include <asm/current.h>
+#include <asm/sn/arch.h>
 #include <asm/sn/sn_cpuid.h>
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/addrs.h>
@@ -63,7 +64,11 @@ sn2_ptc_deadlock_recovery(short *, short
 #define local_node_uses_ptc_ga(sh1)    ((sh1) ? 1 : 0)
 #define max_active_pio(sh1)            ((sh1) ? 32 : 7)
 #define reset_max_active_on_deadlock() 1
+#ifndef XEN
 #define PTC_LOCK(sh1)                  ((sh1) ? &sn2_global_ptc_lock : 
&sn_nodepda->ptc_lock)
+#else
+#define PTC_LOCK(sh1)                  &sn2_global_ptc_lock
+#endif
 
 struct ptc_stats {
        unsigned long ptc_l;
@@ -93,6 +98,11 @@ static inline unsigned long wait_piowc(v
        return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0;
 }
 
+#if 0
+/*
+ * No idea if Xen will ever support this
+ */
+
 /**
  * sn_migrate - SN-specific task migration actions
  * @task: Task being migrated to new CPU
@@ -117,9 +127,10 @@ void sn_tlb_migrate_finish(struct mm_str
 void sn_tlb_migrate_finish(struct mm_struct *mm)
 {
        /* flush_tlb_mm is inefficient if more than 1 users of mm */
-       if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1)
+       if (mm == &current->arch.mm && mm && atomic_read(&mm->mm_users) == 1)
                flush_tlb_mm(mm);
 }
+#endif
 
 /**
  * sn2_global_tlb_purge - globally purge translation cache of virtual address 
range
@@ -143,12 +154,16 @@ void sn_tlb_migrate_finish(struct mm_str
  *       done with ptc.g/MMRs under protection of the global ptc_lock.
  */
 
+/* Xen is soooooooo stupid! */
+static cpumask_t mask_all = CPU_MASK_ALL;
+
 void
-sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
+sn2_global_tlb_purge(/* struct mm_struct *mm, */ unsigned long start,
                     unsigned long end, unsigned long nbits)
 {
        int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid;
-       int mymm = (mm == current->active_mm && mm == current->mm);
+       struct mm_struct *mm;
+       int mymm = (1 /* mm == &current->domain->arch.active_mm */ && mm == 
&current->domain->arch.mm);
        int use_cpu_ptcga;
        volatile unsigned long *ptc0, *ptc1;
        unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr 
= 0;
@@ -159,12 +174,21 @@ sn2_global_tlb_purge(struct mm_struct *m
        nodes_clear(nodes_flushed);
        i = 0;
 
+#if 0  /* One day Xen will grow up! */
        for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
                cnode = cpu_to_node(cpu);
                node_set(cnode, nodes_flushed);
                lcpu = cpu;
                i++;
        }
+#else
+       for_each_cpu(cpu) {
+               cnode = cpu_to_node(cpu);
+               node_set(cnode, nodes_flushed);
+               lcpu = cpu;
+               i++;
+       }
+#endif
 
        if (i == 0)
                return;
@@ -182,19 +206,25 @@ sn2_global_tlb_purge(struct mm_struct *m
                return;
        }
 
+#if 0
        if (atomic_read(&mm->mm_users) == 1 && mymm) {
+#if 0  /* I hate Xen! */
                flush_tlb_mm(mm);
+#else
+               flush_tlb_mask(mask_all);
+#endif
                __get_cpu_var(ptcstats).change_rid++;
                preempt_enable();
                return;
        }
+#endif
 
        itc = ia64_get_itc();
        nix = 0;
        for_each_node_mask(cnode, nodes_flushed)
                nasids[nix++] = cnodeid_to_nasid(cnode);
 
-       rr_value = (mm->context << 3) | REGION_NUMBER(start);
+       rr_value = /* (mm->context << 3) | */ REGION_NUMBER(start);
 
        shub1 = is_shub1();
        if (shub1) {
@@ -202,13 +232,13 @@ sn2_global_tlb_purge(struct mm_struct *m
                        (nbits << SH1_PTC_0_PS_SHFT) |
                        (rr_value << SH1_PTC_0_RID_SHFT) |
                        (1UL << SH1_PTC_0_START_SHFT);
-               ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
-               ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
+               ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0);
+               ptc1 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1);
        } else {
                data0 = (1UL << SH2_PTC_A_SHFT) |
                        (nbits << SH2_PTC_PS_SHFT) |
                        (1UL << SH2_PTC_START_SHFT);
-               ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
+               ptc0 = (unsigned long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + 
                        (rr_value << SH2_PTC_RID_SHFT));
                ptc1 = NULL;
        }
@@ -357,7 +387,6 @@ void sn_send_IPI_phys(int nasid, long ph
                wait_piowc();
                spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
        }
-
 }
 
 EXPORT_SYMBOL(sn_send_IPI_phys);
@@ -385,12 +414,11 @@ void sn2_send_IPI(int cpuid, int vector,
 
        physid = cpu_physical_id(cpuid);
        nasid = cpuid_to_nasid(cpuid);
-
        /* the following is used only when starting cpus at boot time */
        if (unlikely(nasid == -1))
                ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL);
 
-       sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
+       sn_send_IPI_phys(nasid, physid, vector, delivery_mode);
 }
 
 #ifdef CONFIG_PROC_FS
@@ -486,4 +514,3 @@ module_init(sn2_ptc_init);
 module_init(sn2_ptc_init);
 module_exit(sn2_ptc_exit);
 #endif /* CONFIG_PROC_FS */
-
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/arch/ia64/xen/irq.c
--- a/xen/arch/ia64/xen/irq.c   Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/arch/ia64/xen/irq.c   Wed Dec 13 17:30:18 2006 +0100
@@ -47,6 +47,13 @@
 
 #include <xen/event.h>
 #define apicid_to_phys_cpu_present(x)  1
+
+#ifdef CONFIG_IA64_GENERIC
+unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
+{
+       return (unsigned int) vec;
+}
+#endif
 
 /*
  * Linux has a controller-independent x86 interrupt architecture.
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/config.h
--- a/xen/include/asm-ia64/config.h     Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/include/asm-ia64/config.h     Wed Dec 13 17:30:18 2006 +0100
@@ -8,8 +8,9 @@
 // manufactured from component pieces
 
 // defined in linux/arch/ia64/defconfig
-//#define      CONFIG_IA64_GENERIC
-#define        CONFIG_IA64_HP_SIM
+#define        CONFIG_IA64_GENERIC
+#define CONFIG_HZ      32
+
 #define        CONFIG_IA64_L1_CACHE_SHIFT 7
 // needed by include/asm-ia64/page.h
 #define        CONFIG_IA64_PAGE_SIZE_16KB      // 4KB doesn't work?!?
@@ -145,14 +146,6 @@ extern int smp_num_siblings;
 // avoid redefining task_struct in asm/current.h
 #define task_struct vcpu
 
-// linux/include/asm-ia64/machvec.h (linux/arch/ia64/lib/io.c)
-#define platform_inb   __ia64_inb
-#define platform_inw   __ia64_inw
-#define platform_inl   __ia64_inl
-#define platform_outb  __ia64_outb
-#define platform_outw  __ia64_outw
-#define platform_outl  __ia64_outl
-
 #include <xen/cache.h>
 #ifndef CONFIG_SMP
 #define __cacheline_aligned_in_smp
@@ -206,6 +199,16 @@ void sort_main_extable(void);
 // Deprivated linux inf and put here for short time compatibility
 #define kmalloc(s, t) xmalloc_bytes((s))
 #define kfree(s) xfree((s))
+#define kzalloc(size, flags)                           \
+({                                                     \
+       unsigned char *mem;                             \
+       mem = (unsigned char *)xmalloc_bytes(size);     \
+       if (mem)                                        \
+               memset(mem, 0, size);                   \
+       (void *)mem;                                    \
+})
+#define kcalloc(n, size, flags)                kzalloc(n * size, flags)
+#define alloc_bootmem_node(pg, size)   xmalloc_bytes(size)
 
 // see common/keyhandler.c
 #define        nop()   asm volatile ("nop 0")
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/machvec_dig.h
--- a/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h  Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h  Wed Dec 13 17:30:18 
2006 +0100
@@ -11,6 +11,32 @@ extern ia64_mv_setup_t dig_setup;
  * the macros are used directly.
  */
 #define platform_name          "dig"
+#ifdef XEN
+/*
+ * All the World is a PC .... yay! yay! yay!
+ */
+extern ia64_mv_setup_t hpsim_setup;
+#define platform_setup                         hpsim_setup
+
+#define platform_dma_init                      machvec_noop
+#define platform_dma_alloc_coherent            machvec_noop
+#define platform_dma_free_coherent             machvec_noop
+#define platform_dma_map_single                        machvec_noop
+#define platform_dma_unmap_single              machvec_noop
+#define platform_dma_map_sg                    machvec_noop
+#define platform_dma_unmap_sg                  machvec_noop
+#define platform_dma_sync_single_for_cpu       machvec_noop
+#define platform_dma_sync_sg_for_cpu           machvec_noop
+#define platform_dma_sync_single_for_device    machvec_noop
+#define platform_dma_sync_sg_for_device                machvec_noop
+#define platform_dma_mapping_error             machvec_noop
+#define platform_dma_supported                 machvec_noop
+
+#define platform_pci_get_legacy_mem            machvec_noop
+#define platform_pci_legacy_read               machvec_noop
+#define platform_pci_legacy_write              machvec_noop
+#else
 #define platform_setup         dig_setup
+#endif
 
 #endif /* _ASM_IA64_MACHVEC_DIG_h */
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h
--- a/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h        Wed Dec 13 
15:32:03 2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h        Wed Dec 13 
17:30:18 2006 +0100
@@ -19,6 +19,30 @@ extern ia64_mv_dma_mapping_error     sba_dma
  * the macros are used directly.
  */
 #define platform_name                          "hpzx1"
+#ifdef XEN
+extern ia64_mv_setup_t hpsim_setup;
+extern ia64_mv_irq_init_t hpsim_irq_init;
+#define platform_setup                         hpsim_setup
+#define platform_irq_init                      hpsim_irq_init
+
+#define platform_dma_init                      machvec_noop
+#define platform_dma_alloc_coherent            machvec_noop
+#define platform_dma_free_coherent             machvec_noop
+#define platform_dma_map_single                        machvec_noop
+#define platform_dma_unmap_single              machvec_noop
+#define platform_dma_map_sg                    machvec_noop
+#define platform_dma_unmap_sg                  machvec_noop
+#define platform_dma_sync_single_for_cpu       machvec_noop
+#define platform_dma_sync_sg_for_cpu           machvec_noop
+#define platform_dma_sync_single_for_device    machvec_noop
+#define platform_dma_sync_sg_for_device                machvec_noop
+#define platform_dma_mapping_error             machvec_noop
+#define platform_dma_supported                 machvec_noop
+
+#define platform_pci_get_legacy_mem            machvec_noop
+#define platform_pci_legacy_read               machvec_noop
+#define platform_pci_legacy_write              machvec_noop
+#else
 #define platform_setup                         dig_setup
 #define platform_dma_init                      machvec_noop
 #define platform_dma_alloc_coherent            sba_alloc_coherent
@@ -33,5 +57,6 @@ extern ia64_mv_dma_mapping_error      sba_dma
 #define platform_dma_sync_sg_for_device                machvec_dma_sync_sg
 #define platform_dma_supported                 sba_dma_supported
 #define platform_dma_mapping_error             sba_dma_mapping_error
+#endif
 
 #endif /* _ASM_IA64_MACHVEC_HPZX1_h */
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h
--- a/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h  Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h  Wed Dec 13 17:30:18 
2006 +0100
@@ -66,9 +66,11 @@ extern ia64_mv_dma_sync_sg_for_device        sn
 extern ia64_mv_dma_sync_sg_for_device  sn_dma_sync_sg_for_device;
 extern ia64_mv_dma_mapping_error       sn_dma_mapping_error;
 extern ia64_mv_dma_supported           sn_dma_supported;
+#ifndef XEN
 extern ia64_mv_migrate_t               sn_migrate;
 extern ia64_mv_setup_msi_irq_t         sn_setup_msi_irq;
 extern ia64_mv_teardown_msi_irq_t      sn_teardown_msi_irq;
+#endif
 
 
 /*
@@ -83,9 +85,13 @@ extern ia64_mv_teardown_msi_irq_t    sn_tea
 #define platform_cpu_init              sn_cpu_init
 #define platform_irq_init              sn_irq_init
 #define platform_send_ipi              sn2_send_IPI
+#ifndef XEN
 #define platform_timer_interrupt       sn_timer_interrupt
+#endif
 #define platform_global_tlb_purge       sn2_global_tlb_purge
+#ifndef XEN
 #define platform_tlb_migrate_finish    sn_tlb_migrate_finish
+#endif
 #define platform_pci_fixup             sn_pci_fixup
 #define platform_inb                   __sn_inb
 #define platform_inw                   __sn_inw
@@ -103,10 +109,30 @@ extern ia64_mv_teardown_msi_irq_t sn_tea
 #define platform_readl_relaxed         __sn_readl_relaxed
 #define platform_readq_relaxed         __sn_readq_relaxed
 #define platform_local_vector_to_irq   sn_local_vector_to_irq
+#ifdef XEN
+#define platform_pci_get_legacy_mem    machvec_noop
+#define platform_pci_legacy_read       machvec_noop
+#define platform_pci_legacy_write      machvec_noop
+#else
 #define platform_pci_get_legacy_mem    sn_pci_get_legacy_mem
 #define platform_pci_legacy_read       sn_pci_legacy_read
 #define platform_pci_legacy_write      sn_pci_legacy_write
+#endif
 #define platform_dma_init              machvec_noop
+#ifdef XEN
+#define platform_dma_alloc_coherent    machvec_noop
+#define platform_dma_free_coherent     machvec_noop
+#define platform_dma_map_single                machvec_noop
+#define platform_dma_unmap_single      machvec_noop
+#define platform_dma_map_sg            machvec_noop
+#define platform_dma_unmap_sg          machvec_noop
+#define platform_dma_sync_single_for_cpu machvec_noop
+#define platform_dma_sync_sg_for_cpu   machvec_noop
+#define platform_dma_sync_single_for_device machvec_noop
+#define platform_dma_sync_sg_for_device        machvec_noop
+#define platform_dma_mapping_error     machvec_noop
+#define platform_dma_supported         machvec_noop
+#else
 #define platform_dma_alloc_coherent    sn_dma_alloc_coherent
 #define platform_dma_free_coherent     sn_dma_free_coherent
 #define platform_dma_map_single                sn_dma_map_single
@@ -120,6 +146,9 @@ extern ia64_mv_teardown_msi_irq_t   sn_tea
 #define platform_dma_mapping_error             sn_dma_mapping_error
 #define platform_dma_supported         sn_dma_supported
 #define platform_migrate               sn_migrate
+#endif
+
+#ifndef XEN
 #ifdef CONFIG_PCI_MSI
 #define platform_setup_msi_irq         sn_setup_msi_irq
 #define platform_teardown_msi_irq      sn_teardown_msi_irq
@@ -127,6 +156,7 @@ extern ia64_mv_teardown_msi_irq_t   sn_tea
 #define platform_setup_msi_irq         ((ia64_mv_setup_msi_irq_t*)NULL)
 #define platform_teardown_msi_irq      ((ia64_mv_teardown_msi_irq_t*)NULL)
 #endif
+#endif
 
 #include <asm/sn/io.h>
 
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/linux-xen/asm/page.h
--- a/xen/include/asm-ia64/linux-xen/asm/page.h Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/page.h Wed Dec 13 17:30:18 2006 +0100
@@ -11,6 +11,10 @@
 
 #include <asm/intrinsics.h>
 #include <asm/types.h>
+
+#define RGN_SHIFT      61
+#define RGN_BASE(r)    (r << RGN_SHIFT)
+#define RGN_BITS       RGN_BASE(-1)
 
 /*
  * PAGE_SHIFT determines the actual kernel page size.
@@ -36,10 +40,10 @@
 
 #define RGN_MAP_LIMIT  ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE)      /* per 
region addr limit */
 
+# define RGN_HPAGE             (4UL)   /* note: this is hardcoded in 
reload_context()!*/
 #ifdef CONFIG_HUGETLB_PAGE
-# define REGION_HPAGE          (4UL)   /* note: this is hardcoded in 
reload_context()!*/
 # define REGION_SHIFT          61
-# define HPAGE_REGION_BASE     (REGION_HPAGE << REGION_SHIFT)
+# define HPAGE_REGION_BASE     (RGN_HPAGE << REGION_SHIFT)
 # define HPAGE_SHIFT           hpage_shift
 # define HPAGE_SHIFT_DEFAULT   28      /* check ia64 SDM for architecture 
supported size */
 # define HPAGE_SIZE            (__IA64_UL_CONST(1) << HPAGE_SHIFT)
@@ -141,8 +145,8 @@ typedef union ia64_va {
                                 | (REGION_OFFSET(x) >> 
(HPAGE_SHIFT-PAGE_SHIFT)))
 # define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT - PAGE_SHIFT)
 # define is_hugepage_only_range(mm, addr, len)         \
-        (REGION_NUMBER(addr) == REGION_HPAGE &&        \
-         REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE)
+        (REGION_NUMBER(addr) == RGN_HPAGE &&   \
+         REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
 extern unsigned int hpage_shift;
 #endif
 
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/linux-xen/asm/pci.h
--- a/xen/include/asm-ia64/linux-xen/asm/pci.h  Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/pci.h  Wed Dec 13 17:30:18 2006 +0100
@@ -6,9 +6,12 @@
 #include <linux/spinlock.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/ioport.h>
 
 #include <asm/io.h>
+#ifndef XEN
 #include <asm/scatterlist.h>
+#endif
 
 /*
  * Can be used to override the logic in pci_scan_bus for skipping 
already-configured bus
@@ -55,7 +58,9 @@ pcibios_penalize_isa_irq (int irq, int a
 #define HAVE_ARCH_PCI_MWI 1
 extern int pcibios_prep_mwi (struct pci_dev *);
 
+#ifndef XEN
 #include <asm-generic/pci-dma-compat.h>
+#endif
 
 /* pci_unmap_{single,page} is not a nop, thus... */
 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
@@ -107,6 +112,7 @@ extern int pci_mmap_page_range (struct p
 #define HAVE_PCI_LEGACY
 extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
                                      struct vm_area_struct *vma);
+#ifndef XEN
 extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
                                  size_t count);
 extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
@@ -114,6 +120,7 @@ extern int pci_mmap_legacy_mem(struct ko
 extern int pci_mmap_legacy_mem(struct kobject *kobj,
                               struct bin_attribute *attr,
                               struct vm_area_struct *vma);
+#endif
 
 #define pci_get_legacy_mem platform_pci_get_legacy_mem
 #define pci_legacy_read platform_pci_legacy_read
@@ -156,19 +163,6 @@ extern void pcibios_bus_to_resource(stru
 extern void pcibios_bus_to_resource(struct pci_dev *dev,
                struct resource *res, struct pci_bus_region *region);
 
-static inline struct resource *
-pcibios_select_root(struct pci_dev *pdev, struct resource *res)
-{
-       struct resource *root = NULL;
-
-       if (res->flags & IORESOURCE_IO)
-               root = &ioport_resource;
-       if (res->flags & IORESOURCE_MEM)
-               root = &iomem_resource;
-
-       return root;
-}
-
 #define pcibios_scan_all_fns(a, b)     0
 
 #endif /* _ASM_IA64_PCI_H */
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/sn/addrs.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h     Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h     Wed Dec 13 17:30:18 
2006 +0100
@@ -136,13 +136,9 @@
  */
 #define TO_PHYS(x)             (TO_PHYS_MASK & (x))
 #define TO_CAC(x)              (CAC_BASE     | TO_PHYS(x))
-#ifdef CONFIG_SGI_SN
 #define TO_AMO(x)              (AMO_BASE     | TO_PHYS(x))
 #define TO_GET(x)              (GET_BASE     | TO_PHYS(x))
-#else
-#define TO_AMO(x)              ({ BUG(); x; })
-#define TO_GET(x)              ({ BUG(); x; })
-#endif
+
 
 /*
  * Covert from processor physical address to II/TIO physical address:
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/sn/arch.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/arch.h      Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h      Wed Dec 13 17:30:18 
2006 +0100
@@ -5,16 +5,11 @@
  *
  * SGI specific setup.
  *
- * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc.  All rights 
reserved.
- * Copyright (C) 1999 Ralf Baechle (ralf@xxxxxxx)
+ * Copyright (C) 2006 Silicon Graphics, Inc.  All rights reserved.
  */
 #ifndef _ASM_IA64_SN_ARCH_H
 #define _ASM_IA64_SN_ARCH_H
 
-#include <linux/numa.h>
-#include <asm/types.h>
-#include <asm/percpu.h>
-#include <asm/sn/types.h>
 #include <asm/sn/sn_cpuid.h>
 
 /*
@@ -57,15 +52,15 @@ struct sn_hub_info_s {
        u16 nasid_bitmask;
 };
 DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
-#define sn_hub_info    (&__get_cpu_var(__sn_hub_info))
-#define is_shub2()     (sn_hub_info->shub2)
-#define is_shub1()     (sn_hub_info->shub2 == 0)
+#define sn_hub_info    (&__get_cpu_var(__sn_hub_info))
+
+#define is_shub1()     1
+#define is_shub2()     0
 
 /*
  * Use this macro to test if shub 1.1 wars should be enabled
  */
 #define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found)
-
 
 /*
  * Compact node ID to nasid mappings kept in the per-cpu data areas of each
@@ -75,11 +70,4 @@ DECLARE_PER_CPU(short, __sn_cnodeid_to_n
 #define sn_cnodeid_to_nasid    (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
 
 
-extern u8 sn_partition_id;
-extern u8 sn_system_size;
-extern u8 sn_sharing_domain_size;
-extern u8 sn_region_size;
-
-extern void sn_flush_all_caches(long addr, long bytes);
-
 #endif /* _ASM_IA64_SN_ARCH_H */
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h    Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h    Wed Dec 13 17:30:18 
2006 +0100
@@ -8,7 +8,7 @@
 #ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
 #define _ASM_IA64_SN_XTALK_HUBDEV_H
 
-#include "xtalk/xwidgetdev.h"
+#include "asm/sn/xwidgetdev.h"
 
 #define HUB_WIDGET_ID_MAX 0xf
 #define DEV_PER_WIDGET (2*2*8)
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/linux-xen/asm/sn/io.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/io.h        Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/io.h        Wed Dec 13 17:30:18 
2006 +0100
@@ -18,7 +18,14 @@ extern int num_cnodes;
 
 #define __sn_mf_a()   ia64_mfa()
 
+#ifdef XEN
+/*
+ * Xen doesn't deal with any PIC devices directly, it's all handled in dom0
+ */
+#define sn_dma_flush(foo)              do{}while(0)
+#else
 extern void sn_dma_flush(unsigned long);
+#endif
 
 #define __sn_inb ___sn_inb
 #define __sn_inw ___sn_inw
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h    Wed Dec 13 
15:32:03 2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h    Wed Dec 13 
17:30:18 2006 +0100
@@ -8,6 +8,8 @@
 #ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
 #define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
 
+#include <linux/spinlock.h>
+#include <linux/pci.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/pcibus_provider_defs.h>
 
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h    Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h    Wed Dec 13 17:30:18 
2006 +0100
@@ -23,6 +23,6 @@
 
 extern long pio_phys_read_mmr(volatile long *mmr); 
 extern void pio_phys_write_mmr(volatile long *mmr, long val);
-extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, 
volatile long *mmr2, long val2); 
+extern void pio_atomic_phys_write_mmrs(volatile unsigned long *mmr1, long 
val1, volatile unsigned long *mmr2, long val2); 
 
 #endif /* _ASM_IA64_SN_RW_MMR_H */
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h
--- a/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h    Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/sn/sn_sal.h    Wed Dec 13 17:30:18 
2006 +0100
@@ -18,6 +18,7 @@
 #include <asm/sn/arch.h>
 #include <asm/sn/geo.h>
 #include <asm/sn/nodepda.h>
+#include <asm/sn/shub_mmr.h>
 
 // SGI Specific Calls
 #define  SN_SAL_POD_MODE                           0x02000001
@@ -36,6 +37,7 @@
 #define  SN_SAL_GET_FIT_COMPT                     0x0200001b   // reentrant
 #define  SN_SAL_GET_HUB_INFO                       0x0200001c
 #define  SN_SAL_GET_SAPIC_INFO                     0x0200001d
+#define  SN_SAL_GET_SN_INFO                        0x0200001e
 #define  SN_SAL_CONSOLE_PUTC                       0x02000021
 #define  SN_SAL_CONSOLE_GETC                       0x02000022
 #define  SN_SAL_CONSOLE_PUTS                       0x02000023
@@ -73,8 +75,17 @@
 #define  SN_SAL_IOIF_GET_PCIBUS_INFO              0x02000056
 #define  SN_SAL_IOIF_GET_PCIDEV_INFO              0x02000057
 #define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058
+#define  SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST     0x0200005a
 
 #define SN_SAL_HUB_ERROR_INTERRUPT                0x02000060
+#define SN_SAL_BTE_RECOVER                        0x02000061
+#define SN_SAL_RESERVED_DO_NOT_USE                0x02000062
+#define SN_SAL_IOIF_GET_PCI_TOPOLOGY              0x02000064
+
+#define  SN_SAL_GET_PROM_FEATURE_SET              0x02000065
+#define  SN_SAL_SET_OS_FEATURE_SET                0x02000066
+#define  SN_SAL_INJECT_ERROR                      0x02000067
+#define  SN_SAL_SET_CPU_NUMBER                    0x02000068
 
 
 /*
@@ -122,44 +133,24 @@
 #define SALRET_INVALID_ARG     (-2)
 #define SALRET_ERROR           (-3)
 
-
-#ifndef XEN
+#define SN_SAL_FAKE_PROM                          0x02009999
+
 /**
- * sn_sal_rev_major - get the major SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the major value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_major(void)
-{
-       struct ia64_sal_systab *systab = efi.sal_systab;
-
-       return (int)systab->sal_b_rev_major;
-}
-
-/**
- * sn_sal_rev_minor - get the minor SGI SAL revision number
- *
- * The SGI PROM stores its version in sal_[ab]_rev_(major|minor).
- * This routine simply extracts the minor value from the
- * @ia64_sal_systab structure constructed by ia64_sal_init().
- */
-static inline int
-sn_sal_rev_minor(void)
-{
-       struct ia64_sal_systab *systab = efi.sal_systab;
-       
-       return (int)systab->sal_b_rev_minor;
-}
-
-/*
- * Specify the minimum PROM revsion required for this kernel.
- * Note that they're stored in hex format...
- */
-#define SN_SAL_MIN_MAJOR       0x4  /* SN2 kernels need at least PROM 4.0 */
-#define SN_SAL_MIN_MINOR       0x0
+  * sn_sal_revision - get the SGI SAL revision number
+  *
+  * The SGI PROM stores its version in the sal_[ab]_rev_(major|minor).
+  * This routine simply extracts the major and minor values and
+  * presents them in a u32 format.
+  *
+  * For example, version 4.05 would be represented at 0x0405.
+  */
+static inline u32
+sn_sal_rev(void)
+{
+       struct ia64_sal_systab *systab = __va(efi.sal_systab);
+
+       return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor);
+}
 
 /*
  * Returns the master console nasid, if the call fails, return an illegal
@@ -227,7 +218,6 @@ ia64_sn_get_klconfig_addr(nasid_t nasid)
        }
        return ret_stuff.v0 ? __va(ret_stuff.v0) : NULL;
 }
-#endif /* !XEN */
 
 /*
  * Returns the next console character.
@@ -571,6 +561,7 @@ sn_partition_serial_number_val(void) {
                return(sn_partition_serial_number = 
ia64_sn_partition_serial_get());
        }
 }
+#endif
 
 /*
  * Returns the partition id of the nasid passed in as an argument,
@@ -583,10 +574,11 @@ ia64_sn_sysctl_partition_get(nasid_t nas
        SAL_CALL(ret_stuff, SN_SAL_SYSCTL_PARTITION_GET, nasid,
                 0, 0, 0, 0, 0, 0);
        if (ret_stuff.status != 0)
-           return INVALID_PARTID;
+           return -1;
        return ((partid_t)ret_stuff.v0);
 }
 
+#ifndef XEN
 /*
  * Returns the partition id of the current processor.
  */
@@ -894,6 +886,7 @@ ia64_sn_irtr_init(nasid_t nasid, void *b
                           (u64) nasid, (u64) buf, (u64) len, 0, 0, 0);
        return (int) rv.status;
 }
+#endif
 
 /*
  * Returns the nasid, subnode & slice corresponding to a SAPIC ID
@@ -932,6 +925,68 @@ ia64_sn_get_sapic_info(int sapicid, int 
        if (nasid) *nasid = (int) ret_stuff.v0;
        if (subnode) *subnode = (int) ret_stuff.v1;
        if (slice) *slice = (int) ret_stuff.v2;
+       return 0;
+}
+ 
+/*
+ * Returns information about the HUB/SHUB.
+ *  In:
+ *     arg0 - SN_SAL_GET_SN_INFO
+ *     arg1 - 0 (other values reserved for future use)
+ *  Out:
+ *     v0 
+ *             [7:0]   - shub type (0=shub1, 1=shub2)
+ *             [15:8]  - Log2 max number of nodes in entire system (includes
+ *                       C-bricks, I-bricks, etc)
+ *             [23:16] - Log2 of nodes per sharing domain                      
 
+ *             [31:24] - partition ID
+ *             [39:32] - coherency_id
+ *             [47:40] - regionsize
+ *     v1 
+ *             [15:0]  - nasid mask (ex., 0x7ff for 11 bit nasid)
+ *             [23:15] - bit position of low nasid bit
+ */
+static inline u64
+ia64_sn_get_sn_info(int fc, u8 *shubtype, u16 *nasid_bitmask, u8 *nasid_shift, 
+               u8 *systemsize, u8 *sharing_domain_size, u8 *partid, u8 *coher, 
u8 *reg)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ret_stuff.status = 0;
+       ret_stuff.v0 = 0;
+       ret_stuff.v1 = 0;
+       ret_stuff.v2 = 0;
+       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_GET_SN_INFO, fc, 0, 0, 0, 0, 0, 0);
+
+/***** BEGIN HACK - temp til old proms no longer supported ********/
+       if (ret_stuff.status == SALRET_NOT_IMPLEMENTED) {
+               int nasid = get_sapicid() & 0xfff;
+#define SH_SHUB_ID_NODES_PER_BIT_MASK 0x001f000000000000UL
+#define SH_SHUB_ID_NODES_PER_BIT_SHFT 48
+               if (shubtype) *shubtype = 0;
+               if (nasid_bitmask) *nasid_bitmask = 0x7ff;
+               if (nasid_shift) *nasid_shift = 38;
+               if (systemsize) *systemsize = 10;
+               if (sharing_domain_size) *sharing_domain_size = 8;
+               if (partid) *partid = ia64_sn_sysctl_partition_get(nasid);
+               if (coher) *coher = nasid >> 9;
+               if (reg) *reg = (HUB_L((u64 *) LOCAL_MMR_ADDR(SH1_SHUB_ID)) & 
SH_SHUB_ID_NODES_PER_BIT_MASK) >>
+                       SH_SHUB_ID_NODES_PER_BIT_SHFT;
+               return 0;
+       }
+/***** END HACK *******/
+
+       if (ret_stuff.status < 0)
+               return ret_stuff.status;
+
+       if (shubtype) *shubtype = ret_stuff.v0 & 0xff;
+       if (systemsize) *systemsize = (ret_stuff.v0 >> 8) & 0xff;
+       if (sharing_domain_size) *sharing_domain_size = (ret_stuff.v0 >> 16) & 
0xff;
+       if (partid) *partid = (ret_stuff.v0 >> 24) & 0xff;
+       if (coher) *coher = (ret_stuff.v0 >> 32) & 0xff;
+       if (reg) *reg = (ret_stuff.v0 >> 40) & 0xff;
+       if (nasid_bitmask) *nasid_bitmask = (ret_stuff.v1 & 0xffff);
+       if (nasid_shift) *nasid_shift = (ret_stuff.v1 >> 16) & 0xff;
        return 0;
 }
  
@@ -974,6 +1029,7 @@ ia64_sn_get_hub_info(int fc, u64 *arg1, 
        return 0;
 }
  
+#ifndef XEN
 /*
  * This is the access point to the Altix PROM hardware performance
  * and status monitoring interface. For info on using this, see
@@ -991,4 +1047,75 @@ ia64_sn_hwperf_op(nasid_t nasid, u64 opc
        return (int) rv.status;
 }
 #endif /* !XEN */
+
+static inline int
+ia64_sn_ioif_get_pci_topology(u64 buf, u64 len)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_NOLOCK(rv, SN_SAL_IOIF_GET_PCI_TOPOLOGY, buf, len, 0, 0, 0, 0, 
0);
+       return (int) rv.status;
+}
+
+/*
+ * BTE error recovery is implemented in SAL
+ */
+static inline int
+ia64_sn_bte_recovery(nasid_t nasid)
+{
+       struct ia64_sal_retval rv;
+
+       rv.status = 0;
+       SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0);
+       if (rv.status == SALRET_NOT_IMPLEMENTED)
+               return 0;
+       return (int) rv.status;
+}
+
+static inline int
+ia64_sn_is_fake_prom(void)
+{
+       struct ia64_sal_retval rv;
+       SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0);
+       return (rv.status == 0);
+}
+
+static inline int
+ia64_sn_get_prom_feature_set(int set, unsigned long *feature_set)
+{
+       struct ia64_sal_retval rv;
+
+       SAL_CALL_NOLOCK(rv, SN_SAL_GET_PROM_FEATURE_SET, set, 0, 0, 0, 0, 0, 0);
+       if (rv.status != 0)
+               return rv.status;
+       *feature_set = rv.v0;
+       return 0;
+}
+
+static inline int
+ia64_sn_set_os_feature(int feature)
+{
+       struct ia64_sal_retval rv;
+
+       SAL_CALL_NOLOCK(rv, SN_SAL_SET_OS_FEATURE_SET, feature, 0, 0, 0, 0, 0, 
0);
+       return rv.status;
+}
+
+static inline int
+sn_inject_error(u64 paddr, u64 *data, u64 *ecc)
+{
+       struct ia64_sal_retval ret_stuff;
+
+       ia64_sal_oemcall_nolock(&ret_stuff, SN_SAL_INJECT_ERROR, paddr, 
(u64)data,
+                               (u64)ecc, 0, 0, 0, 0);
+       return ret_stuff.status;
+}
+
+static inline int
+ia64_sn_set_cpu_number(int cpu)
+{
+       struct ia64_sal_retval rv;
+
+       SAL_CALL_NOLOCK(rv, SN_SAL_SET_CPU_NUMBER, cpu, 0, 0, 0, 0, 0, 0);
+       return rv.status;
+}
 #endif /* _ASM_IA64_SN_SN_SAL_H */
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/linux-xen/asm/system.h
--- a/xen/include/asm-ia64/linux-xen/asm/system.h       Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/system.h       Wed Dec 13 17:30:18 
2006 +0100
@@ -190,6 +190,7 @@ do {                                                        
        \
 #ifdef XEN
 #define local_irq_is_enabled() (!irqs_disabled())
 extern struct vcpu *ia64_switch_to(struct vcpu *next_task);
+#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 #else
 #ifdef __KERNEL__
 
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/linux/device.h
--- a/xen/include/asm-ia64/linux-xen/linux/device.h     Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/linux/device.h     Wed Dec 13 17:30:18 
2006 +0100
@@ -78,7 +78,9 @@ int __must_check bus_for_each_drv(struct
 /* driverfs interface for exporting bus attributes */
 
 struct bus_attribute {
-       struct attribute        attr;
+#ifndef XEN
+       struct attribute        attr;
+#endif
        ssize_t (*show)(struct bus_type *, char * buf);
        ssize_t (*store)(struct bus_type *, const char * buf, size_t count);
 };
@@ -122,7 +124,9 @@ extern int driver_probe_done(void);
 /* driverfs interface for exporting driver attributes */
 
 struct driver_attribute {
-       struct attribute        attr;
+#ifndef XEN
+       struct attribute        attr;
+#endif
        ssize_t (*show)(struct device_driver *, char * buf);
        ssize_t (*store)(struct device_driver *, const char * buf, size_t 
count);
 };
@@ -152,7 +156,11 @@ struct class {
        struct list_head        children;
        struct list_head        devices;
        struct list_head        interfaces;
+#ifdef XEN
+       spinlock_t              sem;
+#else
        struct semaphore        sem;    /* locks both the children and 
interfaces lists */
+#endif
 
        struct kobject          *virtual_dir;
 
@@ -178,7 +186,9 @@ extern void class_unregister(struct clas
 
 
 struct class_attribute {
-       struct attribute        attr;
+#ifndef XEN
+       struct attribute        attr;
+#endif
        ssize_t (*show)(struct class *, char * buf);
        ssize_t (*store)(struct class *, const char * buf, size_t count);
 };
@@ -191,7 +201,9 @@ extern void class_remove_file(struct cla
 extern void class_remove_file(struct class *, const struct class_attribute *);
 
 struct class_device_attribute {
-       struct attribute        attr;
+#ifndef XEN
+       struct attribute        attr;
+#endif
        ssize_t (*show)(struct class_device *, char * buf);
        ssize_t (*store)(struct class_device *, const char * buf, size_t count);
 };
@@ -333,9 +345,13 @@ struct device {
        struct device_attribute uevent_attr;
        struct device_attribute *devt_attr;
 
+#ifdef XEN
+       spinlock_t              sem;
+#else
        struct semaphore        sem;    /* semaphore to synchronize calls to
                                         * its driver.
                                         */
+#endif
 
        struct bus_type * bus;          /* type of bus device is on */
        struct device_driver *driver;   /* which driver has allocated this
diff -r f936608bf9f8 -r 10c1f9a049e3 
xen/include/asm-ia64/linux-xen/linux/kobject.h
--- a/xen/include/asm-ia64/linux-xen/linux/kobject.h    Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/linux/kobject.h    Wed Dec 13 17:30:18 
2006 +0100
@@ -169,7 +169,9 @@ extern struct kobject * kset_find_obj(st
 
 struct subsystem {
        struct kset             kset;
+#ifndef XEN
        struct rw_semaphore     rwsem;
+#endif
 };
 
 #define decl_subsys(_name,_type,_uevent_ops) \
@@ -254,7 +256,9 @@ static inline void subsys_put(struct sub
 }
 
 struct subsys_attribute {
+#ifndef XEN
        struct attribute attr;
+#endif
        ssize_t (*show)(struct subsystem *, char *);
        ssize_t (*store)(struct subsystem *, const char *, size_t); 
 };
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/linux-xen/linux/pci.h
--- a/xen/include/asm-ia64/linux-xen/linux/pci.h        Wed Dec 13 15:32:03 
2006 +0100
+++ b/xen/include/asm-ia64/linux-xen/linux/pci.h        Wed Dec 13 17:30:18 
2006 +0100
@@ -22,6 +22,7 @@
 
 /* Include the ID list */
 #include <linux/pci_ids.h>
+#include <asm/processor.h>
 
 /*
  * The PCI interface treats multi-function devices as independent
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/xen/list.h
--- a/xen/include/xen/list.h    Wed Dec 13 15:32:03 2006 +0100
+++ b/xen/include/xen/list.h    Wed Dec 13 17:30:18 2006 +0100
@@ -2,6 +2,7 @@
 #define _LINUX_LIST_H
 
 #include <xen/lib.h>
+#include <asm/system.h>
 
 /*
  * Simple doubly linked list implementation.
@@ -66,6 +67,28 @@ static __inline__ void list_add_tail(str
 static __inline__ void list_add_tail(struct list_head *new, struct list_head 
*head)
 {
        __list_add(new, head->prev, head);
+}
+
+/**
+ * list_replace - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ * Note: if 'old' was empty, it will be overwritten.
+ */
+static inline void list_replace(struct list_head *old,
+                               struct list_head *new)
+{
+       new->next = old->next;
+       new->next->prev = new;
+       new->prev = old->prev;
+       new->prev->next = new;
+}
+
+static inline void list_replace_init(struct list_head *old,
+                                       struct list_head *new)
+{
+       list_replace(old, new);
+       INIT_LIST_HEAD(old);
 }
 
 /*
@@ -196,5 +219,316 @@ static __inline__ void list_splice(struc
                n = list_entry(pos->member.next, typeof(*pos), member); \
             &pos->member != (head);                                    \
             pos = n, n = list_entry(n->member.next, typeof(*n), member))
+
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1  ((void *) 0x00100100)
+#define LIST_POISON2  ((void *) 0x00200200)
+
+/*
+ * Double linked lists with a single pointer list head.
+ * Mostly useful for hash tables where the two pointer list head is
+ * too wasteful.
+ * You lose the ability to access the tail in O(1).
+ */
+
+struct hlist_head {
+       struct hlist_node *first;
+};
+
+struct hlist_node {
+       struct hlist_node *next, **pprev;
+};
+
+#define HLIST_HEAD_INIT { .first = NULL }
+#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
+#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
+static inline void INIT_HLIST_NODE(struct hlist_node *h)
+{
+       h->next = NULL;
+       h->pprev = NULL;
+}
+
+static inline int hlist_unhashed(const struct hlist_node *h)
+{
+       return !h->pprev;
+}
+
+static inline int hlist_empty(const struct hlist_head *h)
+{
+       return !h->first;
+}
+
+static inline void __hlist_del(struct hlist_node *n)
+{
+       struct hlist_node *next = n->next;
+       struct hlist_node **pprev = n->pprev;
+       *pprev = next;
+       if (next)
+               next->pprev = pprev;
+}
+
+static inline void hlist_del(struct hlist_node *n)
+{
+       __hlist_del(n);
+       n->next = LIST_POISON1;
+       n->pprev = LIST_POISON2;
+}
+
+/**
+ * hlist_del_rcu - deletes entry from hash list without re-initialization
+ * @n: the element to delete from the hash list.
+ *
+ * Note: list_unhashed() on entry does not return true after this,
+ * the entry is in an undefined state. It is useful for RCU based
+ * lockfree traversal.
+ *
+ * In particular, it means that we can not poison the forward
+ * pointers that may still be used for walking the hash list.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry().
+ */
+static inline void hlist_del_rcu(struct hlist_node *n)
+{
+       __hlist_del(n);
+       n->pprev = LIST_POISON2;
+}
+
+static inline void hlist_del_init(struct hlist_node *n)
+{
+       if (!hlist_unhashed(n)) {
+               __hlist_del(n);
+               INIT_HLIST_NODE(n);
+       }
+}
+
+/*
+ * hlist_replace_rcu - replace old entry by new one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * The old entry will be replaced with the new entry atomically.
+ */
+static inline void hlist_replace_rcu(struct hlist_node *old,
+                                       struct hlist_node *new)
+{
+       struct hlist_node *next = old->next;
+
+       new->next = next;
+       new->pprev = old->pprev;
+       smp_wmb();
+       if (next)
+               new->next->pprev = &new->next;
+       *new->pprev = new;
+       old->pprev = LIST_POISON2;
+}
+
+static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
+{
+       struct hlist_node *first = h->first;
+       n->next = first;
+       if (first)
+               first->pprev = &n->next;
+       h->first = n;
+       n->pprev = &h->first;
+}
+
+
+/**
+ * hlist_add_head_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.  Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_head_rcu(struct hlist_node *n,
+                                       struct hlist_head *h)
+{
+       struct hlist_node *first = h->first;
+       n->next = first;
+       n->pprev = &h->first;
+       smp_wmb();
+       if (first)
+               first->pprev = &n->next;
+       h->first = n;
+}
+
+/* next must be != NULL */
+static inline void hlist_add_before(struct hlist_node *n,
+                                       struct hlist_node *next)
+{
+       n->pprev = next->pprev;
+       n->next = next;
+       next->pprev = &n->next;
+       *(n->pprev) = n;
+}
+
+static inline void hlist_add_after(struct hlist_node *n,
+                                       struct hlist_node *next)
+{
+       next->next = n->next;
+       n->next = next;
+       next->pprev = &n->next;
+
+       if(next->next)
+               next->next->pprev  = &next->next;
+}
+
+/**
+ * hlist_add_before_rcu
+ * @n: the new element to add to the hash list.
+ * @next: the existing element to add the new element before.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist
+ * before the specified node while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.
+ */
+static inline void hlist_add_before_rcu(struct hlist_node *n,
+                                       struct hlist_node *next)
+{
+       n->pprev = next->pprev;
+       n->next = next;
+       smp_wmb();
+       next->pprev = &n->next;
+       *(n->pprev) = n;
+}
+
+/**
+ * hlist_add_after_rcu
+ * @prev: the existing element to add the new element after.
+ * @n: the new element to add to the hash list.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist
+ * after the specified node while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs.
+ */
+static inline void hlist_add_after_rcu(struct hlist_node *prev,
+                                      struct hlist_node *n)
+{
+       n->next = prev->next;
+       n->pprev = &prev->next;
+       smp_wmb();
+       prev->next = n;
+       if (n->next)
+               n->next->pprev = &n->next;
+}
+
+#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
+
+#define hlist_for_each(pos, head) \
+       for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
+            pos = pos->next)
+
+#define hlist_for_each_safe(pos, n, head) \
+       for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
+            pos = n)
+
+/**
+ * hlist_for_each_entry        - iterate over list of given type
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry(tpos, pos, head, member)                   \
+       for (pos = (head)->first;                                        \
+            pos && ({ prefetch(pos->next); 1;}) &&                      \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = pos->next)
+
+/**
+ * hlist_for_each_entry_continue - iterate over a hlist continuing after 
current point
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @member:    the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_continue(tpos, pos, member)                \
+       for (pos = (pos)->next;                                          \
+            pos && ({ prefetch(pos->next); 1;}) &&                      \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = pos->next)
+
+/**
+ * hlist_for_each_entry_from - iterate over a hlist continuing from current 
point
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @member:    the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_from(tpos, pos, member)                    \
+       for (; pos && ({ prefetch(pos->next); 1;}) &&                    \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = pos->next)
+
+/**
+ * hlist_for_each_entry_safe - iterate over list of given type safe against 
removal of list entry
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @n:         another &struct hlist_node to use as temporary storage
+ * @head:      the head for your list.
+ * @member:    the name of the hlist_node within the struct.
+ */
+#define hlist_for_each_entry_safe(tpos, pos, n, head, member)           \
+       for (pos = (head)->first;                                        \
+            pos && ({ n = pos->next; 1; }) &&                           \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = n)
+
+/**
+ * hlist_for_each_entry_rcu - iterate over rcu list of given type
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the hlist_node within the struct.
+ *
+ * This list-traversal primitive may safely run concurrently with
+ * the _rcu list-mutation primitives such as hlist_add_head_rcu()
+ * as long as the traversal is guarded by rcu_read_lock().
+ */
+#define hlist_for_each_entry_rcu(tpos, pos, head, member)               \
+       for (pos = (head)->first;                                        \
+            rcu_dereference(pos) && ({ prefetch(pos->next); 1;}) &&     \
+               ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
+            pos = pos->next)
+
 #endif /* _LINUX_LIST_H */
 
diff -r f936608bf9f8 -r 10c1f9a049e3 xen/include/asm-ia64/linux/asm/pci.h
--- a/xen/include/asm-ia64/linux/asm/pci.h      Wed Dec 13 15:32:03 2006 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,161 +0,0 @@
-#ifndef _ASM_IA64_PCI_H
-#define _ASM_IA64_PCI_H
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-#include <asm/scatterlist.h>
-
-/*
- * Can be used to override the logic in pci_scan_bus for skipping 
already-configured bus
- * numbers - to be used for buggy BIOSes or architectures with incomplete PCI 
setup by the
- * loader.
- */
-#define pcibios_assign_all_busses()     0
-#define pcibios_scan_all_fns(a, b)     0
-
-#define PCIBIOS_MIN_IO         0x1000
-#define PCIBIOS_MIN_MEM                0x10000000
-
-void pcibios_config_init(void);
-
-struct pci_dev;
-
-/*
- * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct 
correspondence
- * between device bus addresses and CPU physical addresses.  Platforms with a 
hardware I/O
- * MMU _must_ turn this off to suppress the bounce buffer handling code in the 
block and
- * network device layers.  Platforms with separate bus address spaces _must_ 
turn this off
- * and provide a device DMA mapping implementation that takes care of the 
necessary
- * address translation.
- *
- * For now, the ia64 platforms which may have separate/multiple bus address 
spaces all
- * have I/O MMUs which support the merging of physically discontiguous 
buffers, so we can
- * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS.
- */
-extern unsigned long ia64_max_iommu_merge_mask;
-#define PCI_DMA_BUS_IS_PHYS    (ia64_max_iommu_merge_mask == ~0UL)
-
-static inline void
-pcibios_set_master (struct pci_dev *dev)
-{
-       /* No special bus mastering setup handling */
-}
-
-static inline void
-pcibios_penalize_isa_irq (int irq, int active)
-{
-       /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define HAVE_ARCH_PCI_MWI 1
-extern int pcibios_prep_mwi (struct pci_dev *);
-
-#include <asm-generic/pci-dma-compat.h>
-
-/* pci_unmap_{single,page} is not a nop, thus... */
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)      \
-       dma_addr_t ADDR_NAME;
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)                \
-       __u32 LEN_NAME;
-#define pci_unmap_addr(PTR, ADDR_NAME)                 \
-       ((PTR)->ADDR_NAME)
-#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)                \
-       (((PTR)->ADDR_NAME) = (VAL))
-#define pci_unmap_len(PTR, LEN_NAME)                   \
-       ((PTR)->LEN_NAME)
-#define pci_unmap_len_set(PTR, LEN_NAME, VAL)          \
-       (((PTR)->LEN_NAME) = (VAL))
-
-/* The ia64 platform always supports 64-bit addressing. */
-#define pci_dac_dma_supported(pci_dev, mask)           (1)
-#define pci_dac_page_to_dma(dev,pg,off,dir)            ((dma_addr_t) 
page_to_bus(pg) + (off))
-#define pci_dac_dma_to_page(dev,dma_addr)              
(virt_to_page(bus_to_virt(dma_addr)))
-#define pci_dac_dma_to_offset(dev,dma_addr)            offset_in_page(dma_addr)
-#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir)  do { } while (0)
-#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir)       do { 
mb(); } while (0)
-
-#define sg_dma_len(sg)         ((sg)->dma_length)
-#define sg_dma_address(sg)     ((sg)->dma_address)
-
-#ifdef CONFIG_PCI
-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
-                                       enum pci_dma_burst_strategy *strat,
-                                       unsigned long *strategy_parameter)
-{
-       unsigned long cacheline_size;
-       u8 byte;
-
-       pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
-       if (byte == 0)
-               cacheline_size = 1024;
-       else
-               cacheline_size = (int) byte * 4;
-
-       *strat = PCI_DMA_BURST_MULTIPLE;
-       *strategy_parameter = cacheline_size;
-}
-#endif
-
-#define HAVE_PCI_MMAP
-extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct 
*vma,
-                               enum pci_mmap_state mmap_state, int 
write_combine);
-#define HAVE_PCI_LEGACY
-extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
-                                     struct vm_area_struct *vma);
-extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off,
-                                 size_t count);
-extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off,
-                                  size_t count);
-extern int pci_mmap_legacy_mem(struct kobject *kobj,
-                              struct bin_attribute *attr,
-                              struct vm_area_struct *vma);
-
-#define pci_get_legacy_mem platform_pci_get_legacy_mem
-#define pci_legacy_read platform_pci_legacy_read
-#define pci_legacy_write platform_pci_legacy_write
-
-struct pci_window {
-       struct resource resource;
-       u64 offset;
-};
-
-struct pci_controller {
-       void *acpi_handle;
-       void *iommu;
-       int segment;
-       int node;               /* nearest node with memory or -1 for global 
allocation */
-
-       unsigned int windows;
-       struct pci_window *window;
-
-       void *platform_data;
-};
-
-#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata)
-#define pci_domain_nr(busdev)    (PCI_CONTROLLER(busdev)->segment)
-
-extern struct pci_ops pci_root_ops;
-
-static inline int pci_proc_domain(struct pci_bus *bus)
-{
-       return (pci_domain_nr(bus) != 0);
-}
-
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
-extern void pcibios_resource_to_bus(struct pci_dev *dev,
-               struct pci_bus_region *region, struct resource *res);
-
-extern void pcibios_bus_to_resource(struct pci_dev *dev,
-               struct resource *res, struct pci_bus_region *region);
-
-#define pcibios_scan_all_fns(a, b)     0
-
-#endif /* _ASM_IA64_PCI_H */
_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.