[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] [IA64] new files from Linux 2.6.19 required for SN2 and machvec support
# HG changeset patch # User awilliam@xxxxxxxxxxxx # Date 1166033320 25200 # Node ID de3d55c10f160d4271d88e3211bdd9b8e754f7ae # Parent c0d41ac21486b69a3b81ac5692c8e6ae0592a789 [IA64] new files from Linux 2.6.19 required for SN2 and machvec support Signed-off-by: Jes Sorensen <jes@xxxxxxx> --- xen/include/asm-ia64/linux-null/linux/ioport.h | 1 xen/arch/ia64/linux-xen/sn/Makefile | 1 xen/arch/ia64/linux-xen/sn/kernel/Makefile | 5 xen/arch/ia64/linux-xen/sn/kernel/README.origin | 12 xen/arch/ia64/linux-xen/sn/kernel/io_init.c | 740 +++ xen/arch/ia64/linux-xen/sn/kernel/iomv.c | 76 xen/arch/ia64/linux-xen/sn/kernel/irq.c | 444 + xen/arch/ia64/linux-xen/sn/kernel/setup.c | 755 +++ xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c | 489 ++ xen/arch/ia64/linux/Makefile | 1 xen/arch/ia64/linux/README.origin | 3 xen/arch/ia64/linux/dig/Makefile | 1 xen/arch/ia64/linux/dig/README.origin | 7 xen/arch/ia64/linux/dig/machvec.c | 3 xen/arch/ia64/linux/hp/Makefile | 1 xen/arch/ia64/linux/hp/zx1/Makefile | 1 xen/arch/ia64/linux/hp/zx1/README.origin | 7 xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c | 3 xen/arch/ia64/linux/io.c | 164 xen/arch/ia64/linux/sn/Makefile | 2 xen/arch/ia64/linux/sn/kernel/Makefile | 3 xen/arch/ia64/linux/sn/kernel/README.origin | 9 xen/arch/ia64/linux/sn/kernel/machvec.c | 11 xen/arch/ia64/linux/sn/kernel/pio_phys.S | 71 xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S | 92 xen/arch/ia64/linux/sn/pci/Makefile | 1 xen/arch/ia64/linux/sn/pci/pcibr/Makefile | 1 xen/arch/ia64/linux/sn/pci/pcibr/README.origin | 7 xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c | 285 + xen/include/asm-ia64/linux-xen/asm/README.origin | 6 xen/include/asm-ia64/linux-xen/asm/machvec_dig.h | 16 xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h | 37 xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h | 133 xen/include/asm-ia64/linux-xen/asm/pci.h | 174 xen/include/asm-ia64/linux-xen/asm/sn/README.origin | 16 xen/include/asm-ia64/linux-xen/asm/sn/addrs.h | 299 + xen/include/asm-ia64/linux-xen/asm/sn/arch.h | 85 xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h | 91 xen/include/asm-ia64/linux-xen/asm/sn/intr.h | 67 xen/include/asm-ia64/linux-xen/asm/sn/io.h | 274 + xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h | 83 xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h | 149 xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h | 28 xen/include/asm-ia64/linux-xen/asm/sn/types.h | 26 xen/include/asm-ia64/linux-xen/linux/README.origin | 5 xen/include/asm-ia64/linux-xen/linux/device.h | 473 ++ xen/include/asm-ia64/linux-xen/linux/kobject.h | 282 + xen/include/asm-ia64/linux-xen/linux/pci.h | 817 +++ xen/include/asm-ia64/linux/README.origin | 12 xen/include/asm-ia64/linux/asm/README.origin | 3 xen/include/asm-ia64/linux/asm/machvec_init.h | 32 xen/include/asm-ia64/linux/asm/sn/README.origin | 23 xen/include/asm-ia64/linux/asm/sn/geo.h | 132 xen/include/asm-ia64/linux/asm/sn/klconfig.h | 246 + xen/include/asm-ia64/linux/asm/sn/l1.h | 51 xen/include/asm-ia64/linux/asm/sn/leds.h | 33 xen/include/asm-ia64/linux/asm/sn/module.h | 127 xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h | 68 xen/include/asm-ia64/linux/asm/sn/pcidev.h | 83 xen/include/asm-ia64/linux/asm/sn/pda.h | 69 xen/include/asm-ia64/linux/asm/sn/pic.h | 261 + xen/include/asm-ia64/linux/asm/sn/shub_mmr.h | 502 ++ xen/include/asm-ia64/linux/asm/sn/shubio.h | 3358 +++++++++++++++ xen/include/asm-ia64/linux/asm/sn/simulator.h | 20 xen/include/asm-ia64/linux/asm/sn/sn_cpuid.h | 132 xen/include/asm-ia64/linux/asm/sn/sn_feature_sets.h | 51 xen/include/asm-ia64/linux/asm/sn/tiocp.h | 257 + xen/include/asm-ia64/linux/asm/sn/xbow.h | 301 + xen/include/asm-ia64/linux/asm/sn/xwidgetdev.h | 70 xen/include/asm-ia64/linux/completion.h | 57 xen/include/asm-ia64/linux/ioport.h | 136 xen/include/asm-ia64/linux/klist.h | 61 xen/include/asm-ia64/linux/kref.h | 32 xen/include/asm-ia64/linux/mod_devicetable.h | 323 + xen/include/asm-ia64/linux/pci_ids.h | 2356 ++++++++++ xen/include/asm-ia64/linux/pci_regs.h | 488 ++ xen/include/asm-ia64/linux/pm.h | 279 + xen/include/asm-ia64/linux/sysfs.h | 206 78 files changed, 16025 insertions(+), 1 deletion(-) diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,1 @@ +subdir-y += kernel diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,5 @@ +obj-y += sn2_smp.o +obj-y += setup.o +obj-y += iomv.o +obj-y += irq.o +obj-y += io_init.o diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,12 @@ +# Source files in this directory are near-identical copies of linux-2.6.19 +# files: + +# NOTE: ALL changes to these files should be clearly marked +# (e.g. with #ifdef XEN or XEN in a comment) so that they can be +# easily updated to future versions of the corresponding Linux files. + +io_init.c -> linux/arch/ia64/sn/kernel/io_init.c +iomv.c -> linux/arch/ia64/sn/kernel/iomv.c +irq.c -> linux/arch/ia64/sn/kernel/irq.c +setup.c -> linux/arch/ia64/sn/kernel/setup.c +sn2_smp.c -> linux/arch/ia64/sn/kernel/sn2/sn2_smp.c diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/io_init.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/io_init.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,740 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/bootmem.h> +#include <linux/nodemask.h> +#include <asm/sn/types.h> +#include <asm/sn/addrs.h> +#include <asm/sn/sn_feature_sets.h> +#include <asm/sn/geo.h> +#include <asm/sn/io.h> +#include <asm/sn/l1.h> +#include <asm/sn/module.h> +#include <asm/sn/pcibr_provider.h> +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/pcidev.h> +#include <asm/sn/simulator.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/tioca_provider.h> +#include <asm/sn/tioce_provider.h> +#include "xtalk/hubdev.h" +#include "xtalk/xwidgetdev.h" + + +extern void sn_init_cpei_timer(void); +extern void register_sn_procfs(void); + +static struct list_head sn_sysdata_list; + +/* sysdata list struct */ +struct sysdata_el { + struct list_head entry; + void *sysdata; +}; + +struct slab_info { + struct hubdev_info hubdev; +}; + +struct brick { + moduleid_t id; /* Module ID of this module */ + struct slab_info slab_info[MAX_SLABS + 1]; +}; + +int sn_ioif_inited; /* SN I/O infrastructure initialized? */ + +struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ + +static int max_segment_number; /* Default highest segment number */ +static int max_pcibus_number = 255; /* Default highest pci bus number */ + +/* + * Hooks and struct for unsupported pci providers + */ + +static dma_addr_t +sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type) +{ + return 0; +} + +static void +sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction) +{ + return; +} + +static void * +sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller) +{ + return NULL; +} + +static struct sn_pcibus_provider sn_pci_default_provider = { + .dma_map = sn_default_pci_map, + .dma_map_consistent = sn_default_pci_map, + .dma_unmap = sn_default_pci_unmap, + .bus_fixup = sn_default_pci_bus_fixup, +}; + +/* + * Retrieve the DMA Flush List given nasid, widget, and device. + * This list is needed to implement the WAR - Flush DMA data on PIO Reads. + */ +static inline u64 +sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, + u64 address) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, + (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST, + (u64) nasid, (u64) widget_num, + (u64) device_num, (u64) address, 0, 0, 0); + return ret_stuff.status; +} + +/* + * Retrieve the hub device info structure for the given nasid. + */ +static inline u64 sal_get_hubdev_info(u64 handle, u64 address) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, + (u64) SN_SAL_IOIF_GET_HUBDEV_INFO, + (u64) handle, (u64) address, 0, 0, 0, 0, 0); + return ret_stuff.v0; +} + +/* + * Retrieve the pci bus information given the bus number. + */ +static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, + (u64) SN_SAL_IOIF_GET_PCIBUS_INFO, + (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0); + return ret_stuff.v0; +} + +/* + * Retrieve the pci device information given the bus and device|function number. + */ +static inline u64 +sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, + u64 sn_irq_info) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, + (u64) SN_SAL_IOIF_GET_PCIDEV_INFO, + (u64) segment, (u64) bus_number, (u64) devfn, + (u64) pci_dev, + sn_irq_info, 0, 0); + return ret_stuff.v0; +} + +/* + * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified + * device. + */ +inline struct pcidev_info * +sn_pcidev_info_get(struct pci_dev *dev) +{ + struct pcidev_info *pcidev; + + list_for_each_entry(pcidev, + &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) { + if (pcidev->pdi_linux_pcidev == dev) { + return pcidev; + } + } + return NULL; +} + +/* Older PROM flush WAR + * + * 01/16/06 -- This war will be in place until a new official PROM is released. + * Additionally note that the struct sn_flush_device_war also has to be + * removed from arch/ia64/sn/include/xtalk/hubdev.h + */ +static u8 war_implemented = 0; + +static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device, + struct sn_flush_device_common *common) +{ + struct sn_flush_device_war *war_list; + struct sn_flush_device_war *dev_entry; + struct ia64_sal_retval isrv = {0,0,0,0}; + + if (!war_implemented) { + printk(KERN_WARNING "PROM version < 4.50 -- implementing old " + "PROM flush WAR\n"); + war_implemented = 1; + } + + war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL); + if (!war_list) + BUG(); + + SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST, + nasid, widget, __pa(war_list), 0, 0, 0 ,0); + if (isrv.status) + panic("sn_device_fixup_war failed: %s\n", + ia64_sal_strerror(isrv.status)); + + dev_entry = war_list + device; + memcpy(common,dev_entry, sizeof(*common)); + kfree(war_list); + + return isrv.status; +} + +/* + * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for + * each node in the system. + */ +static void __init sn_fixup_ionodes(void) +{ + struct sn_flush_device_kernel *sn_flush_device_kernel; + struct sn_flush_device_kernel *dev_entry; + struct hubdev_info *hubdev; + u64 status; + u64 nasid; + int i, widget, device, size; + + /* + * Get SGI Specific HUB chipset information. + * Inform Prom that this kernel can support domain bus numbering. + */ + for (i = 0; i < num_cnodes; i++) { + hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo); + nasid = cnodeid_to_nasid(i); + hubdev->max_segment_number = 0xffffffff; + hubdev->max_pcibus_number = 0xff; + status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev)); + if (status) + continue; + + /* Save the largest Domain and pcibus numbers found. */ + if (hubdev->max_segment_number) { + /* + * Dealing with a Prom that supports segments. + */ + max_segment_number = hubdev->max_segment_number; + max_pcibus_number = hubdev->max_pcibus_number; + } + + /* Attach the error interrupt handlers */ + if (nasid & 1) + ice_error_init(hubdev); + else + hub_error_init(hubdev); + + for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) + hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev; + + if (!hubdev->hdi_flush_nasid_list.widget_p) + continue; + + size = (HUB_WIDGET_ID_MAX + 1) * + sizeof(struct sn_flush_device_kernel *); + hubdev->hdi_flush_nasid_list.widget_p = + kzalloc(size, GFP_KERNEL); + if (!hubdev->hdi_flush_nasid_list.widget_p) + BUG(); + + for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) { + size = DEV_PER_WIDGET * + sizeof(struct sn_flush_device_kernel); + sn_flush_device_kernel = kzalloc(size, GFP_KERNEL); + if (!sn_flush_device_kernel) + BUG(); + + dev_entry = sn_flush_device_kernel; + for (device = 0; device < DEV_PER_WIDGET; + device++,dev_entry++) { + size = sizeof(struct sn_flush_device_common); + dev_entry->common = kzalloc(size, GFP_KERNEL); + if (!dev_entry->common) + BUG(); + + if (sn_prom_feature_available( + PRF_DEVICE_FLUSH_LIST)) + status = sal_get_device_dmaflush_list( + nasid, widget, device, + (u64)(dev_entry->common)); + else + status = sn_device_fixup_war(nasid, + widget, device, + dev_entry->common); + if (status != SALRET_OK) + panic("SAL call failed: %s\n", + ia64_sal_strerror(status)); + + spin_lock_init(&dev_entry->sfdl_flush_lock); + } + + if (sn_flush_device_kernel) + hubdev->hdi_flush_nasid_list.widget_p[widget] = + sn_flush_device_kernel; + } + } +} + +/* + * sn_pci_window_fixup() - Create a pci_window for each device resource. + * Until ACPI support is added, we need this code + * to setup pci_windows for use by + * pcibios_bus_to_resource(), + * pcibios_resource_to_bus(), etc. + */ +static void +sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, + s64 * pci_addrs) +{ + struct pci_controller *controller = PCI_CONTROLLER(dev->bus); + unsigned int i; + unsigned int idx; + unsigned int new_count; + struct pci_window *new_window; + + if (count == 0) + return; + idx = controller->windows; + new_count = controller->windows + count; + new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL); + if (new_window == NULL) + BUG(); + if (controller->window) { + memcpy(new_window, controller->window, + sizeof(struct pci_window) * controller->windows); + kfree(controller->window); + } + + /* Setup a pci_window for each device resource. */ + for (i = 0; i <= PCI_ROM_RESOURCE; i++) { + if (pci_addrs[i] == -1) + continue; + + new_window[idx].offset = dev->resource[i].start - pci_addrs[i]; + new_window[idx].resource = dev->resource[i]; + idx++; + } + + controller->windows = new_count; + controller->window = new_window; +} + +void sn_pci_unfixup_slot(struct pci_dev *dev) +{ + struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev; + + sn_irq_unfixup(dev); + pci_dev_put(host_pci_dev); + pci_dev_put(dev); +} + +/* + * sn_pci_fixup_slot() - This routine sets up a slot's resources + * consistent with the Linux PCI abstraction layer. Resources acquired + * from our PCI provider include PIO maps to BAR space and interrupt + * objects. + */ +void sn_pci_fixup_slot(struct pci_dev *dev) +{ + unsigned int count = 0; + int idx; + int segment = pci_domain_nr(dev->bus); + int status = 0; + struct pcibus_bussoft *bs; + struct pci_bus *host_pci_bus; + struct pci_dev *host_pci_dev; + struct pcidev_info *pcidev_info; + s64 pci_addrs[PCI_ROM_RESOURCE + 1]; + struct sn_irq_info *sn_irq_info; + unsigned long size; + unsigned int bus_no, devfn; + + pci_dev_get(dev); /* for the sysdata pointer */ + pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); + if (!pcidev_info) + BUG(); /* Cannot afford to run out of memory */ + + sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); + if (!sn_irq_info) + BUG(); /* Cannot afford to run out of memory */ + + /* Call to retrieve pci device information needed by kernel. */ + status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, + dev->devfn, + (u64) __pa(pcidev_info), + (u64) __pa(sn_irq_info)); + if (status) + BUG(); /* Cannot get platform pci device information */ + + /* Add pcidev_info to list in sn_pci_controller struct */ + list_add_tail(&pcidev_info->pdi_list, + &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info)); + + /* Copy over PIO Mapped Addresses */ + for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { + unsigned long start, end, addr; + + if (!pcidev_info->pdi_pio_mapped_addr[idx]) { + pci_addrs[idx] = -1; + continue; + } + + start = dev->resource[idx].start; + end = dev->resource[idx].end; + size = end - start; + if (size == 0) { + pci_addrs[idx] = -1; + continue; + } + pci_addrs[idx] = start; + count++; + addr = pcidev_info->pdi_pio_mapped_addr[idx]; + addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET; + dev->resource[idx].start = addr; + dev->resource[idx].end = addr + size; + if (dev->resource[idx].flags & IORESOURCE_IO) + dev->resource[idx].parent = &ioport_resource; + else + dev->resource[idx].parent = &iomem_resource; + } + /* Create a pci_window in the pci_controller struct for + * each device resource. + */ + if (count > 0) + sn_pci_window_fixup(dev, count, pci_addrs); + + /* + * Using the PROMs values for the PCI host bus, get the Linux + * PCI host_pci_dev struct and set up host bus linkages + */ + + bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff; + devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff; + host_pci_bus = pci_find_bus(segment, bus_no); + host_pci_dev = pci_get_slot(host_pci_bus, devfn); + + pcidev_info->host_pci_dev = host_pci_dev; + pcidev_info->pdi_linux_pcidev = dev; + pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev); + bs = SN_PCIBUS_BUSSOFT(dev->bus); + pcidev_info->pdi_pcibus_info = bs; + + if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) { + SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type]; + } else { + SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider; + } + + /* Only set up IRQ stuff if this device has a host bus context */ + if (bs && sn_irq_info->irq_irq) { + pcidev_info->pdi_sn_irq_info = sn_irq_info; + dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq; + sn_irq_fixup(dev, sn_irq_info); + } else { + pcidev_info->pdi_sn_irq_info = NULL; + kfree(sn_irq_info); + } +} + +/* + * sn_pci_controller_fixup() - This routine sets up a bus's resources + * consistent with the Linux PCI abstraction layer. + */ +void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus) +{ + int status; + int nasid, cnode; + struct pci_controller *controller; + struct sn_pci_controller *sn_controller; + struct pcibus_bussoft *prom_bussoft_ptr; + struct hubdev_info *hubdev_info; + void *provider_soft; + struct sn_pcibus_provider *provider; + + status = sal_get_pcibus_info((u64) segment, (u64) busnum, + (u64) ia64_tpa(&prom_bussoft_ptr)); + if (status > 0) + return; /*bus # does not exist */ + prom_bussoft_ptr = __va(prom_bussoft_ptr); + + /* Allocate a sn_pci_controller, which has a pci_controller struct + * as the first member. + */ + sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL); + if (!sn_controller) + BUG(); + INIT_LIST_HEAD(&sn_controller->pcidev_info); + controller = &sn_controller->pci_controller; + controller->segment = segment; + + if (bus == NULL) { + bus = pci_scan_bus(busnum, &pci_root_ops, controller); + if (bus == NULL) + goto error_return; /* error, or bus already scanned */ + bus->sysdata = NULL; + } + + if (bus->sysdata) + goto error_return; /* sysdata already alloc'd */ + + /* + * Per-provider fixup. Copies the contents from prom to local + * area and links SN_PCIBUS_BUSSOFT(). + */ + + if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) + goto error_return; /* unsupported asic type */ + + if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) + goto error_return; /* no further fixup necessary */ + + provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type]; + if (provider == NULL) + goto error_return; /* no provider registerd for this asic */ + + bus->sysdata = controller; + if (provider->bus_fixup) + provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller); + else + provider_soft = NULL; + + if (provider_soft == NULL) { + /* fixup failed or not applicable */ + bus->sysdata = NULL; + goto error_return; + } + + /* + * Setup pci_windows for legacy IO and MEM space. + * (Temporary until ACPI support is in place.) + */ + controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL); + if (controller->window == NULL) + BUG(); + controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io; + controller->window[0].resource.name = "legacy_io"; + controller->window[0].resource.flags = IORESOURCE_IO; + controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io; + controller->window[0].resource.end = + controller->window[0].resource.start + 0xffff; + controller->window[0].resource.parent = &ioport_resource; + controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem; + controller->window[1].resource.name = "legacy_mem"; + controller->window[1].resource.flags = IORESOURCE_MEM; + controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem; + controller->window[1].resource.end = + controller->window[1].resource.start + (1024 * 1024) - 1; + controller->window[1].resource.parent = &iomem_resource; + controller->windows = 2; + + /* + * Generic bus fixup goes here. Don't reference prom_bussoft_ptr + * after this point. + */ + + PCI_CONTROLLER(bus)->platform_data = provider_soft; + nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base); + cnode = nasid_to_cnodeid(nasid); + hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); + SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info = + &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]); + + /* + * If the node information we obtained during the fixup phase is invalid + * then set controller->node to -1 (undetermined) + */ + if (controller->node >= num_online_nodes()) { + struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus); + + printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u" + "L_IO=%lx L_MEM=%lx BASE=%lx\n", + b->bs_asic_type, b->bs_xid, b->bs_persist_busnum, + b->bs_legacy_io, b->bs_legacy_mem, b->bs_base); + printk(KERN_WARNING "on node %d but only %d nodes online." + "Association set to undetermined.\n", + controller->node, num_online_nodes()); + controller->node = -1; + } + return; + +error_return: + + kfree(sn_controller); + return; +} + +void sn_bus_store_sysdata(struct pci_dev *dev) +{ + struct sysdata_el *element; + + element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); + if (!element) { + dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__); + return; + } + element->sysdata = SN_PCIDEV_INFO(dev); + list_add(&element->entry, &sn_sysdata_list); +} + +void sn_bus_free_sysdata(void) +{ + struct sysdata_el *element; + struct list_head *list, *safe; + + list_for_each_safe(list, safe, &sn_sysdata_list) { + element = list_entry(list, struct sysdata_el, entry); + list_del(&element->entry); + list_del(&(((struct pcidev_info *) + (element->sysdata))->pdi_list)); + kfree(element->sysdata); + kfree(element); + } + return; +} + +/* + * Ugly hack to get PCI setup until we have a proper ACPI namespace. + */ + +#define PCI_BUSES_TO_SCAN 256 + +static int __init sn_pci_init(void) +{ + int i, j; + struct pci_dev *pci_dev = NULL; + + if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) + return 0; + + /* + * prime sn_pci_provider[]. Individial provider init routines will + * override their respective default entries. + */ + + for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++) + sn_pci_provider[i] = &sn_pci_default_provider; + + pcibr_init_provider(); + tioca_init_provider(); + tioce_init_provider(); + + /* + * This is needed to avoid bounce limit checks in the blk layer + */ + ia64_max_iommu_merge_mask = ~PAGE_MASK; + sn_fixup_ionodes(); + sn_irq_lh_init(); + INIT_LIST_HEAD(&sn_sysdata_list); + sn_init_cpei_timer(); + +#ifdef CONFIG_PROC_FS + register_sn_procfs(); +#endif + + /* busses are not known yet ... */ + for (i = 0; i <= max_segment_number; i++) + for (j = 0; j <= max_pcibus_number; j++) + sn_pci_controller_fixup(i, j, NULL); + + /* + * Generic Linux PCI Layer has created the pci_bus and pci_dev + * structures - time for us to add our SN PLatform specific + * information. + */ + + while ((pci_dev = + pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) + sn_pci_fixup_slot(pci_dev); + + sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */ + + return 0; +} + +/* + * hubdev_init_node() - Creates the HUB data structure and link them to it's + * own NODE specific data area. + */ +void hubdev_init_node(nodepda_t * npda, cnodeid_t node) +{ + struct hubdev_info *hubdev_info; + int size; + pg_data_t *pg; + + size = sizeof(struct hubdev_info); + + if (node >= num_online_nodes()) /* Headless/memless IO nodes */ + pg = NODE_DATA(0); + else + pg = NODE_DATA(node); + + hubdev_info = (struct hubdev_info *)alloc_bootmem_node(pg, size); + + npda->pdinfo = (void *)hubdev_info; +} + +geoid_t +cnodeid_get_geoid(cnodeid_t cnode) +{ + struct hubdev_info *hubdev; + + hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo); + return hubdev->hdi_geoid; +} + +void sn_generate_path(struct pci_bus *pci_bus, char *address) +{ + nasid_t nasid; + cnodeid_t cnode; + geoid_t geoid; + moduleid_t moduleid; + u16 bricktype; + + nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); + cnode = nasid_to_cnodeid(nasid); + geoid = cnodeid_get_geoid(cnode); + moduleid = geo_module(geoid); + + sprintf(address, "module_%c%c%c%c%.2d", + '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)), + '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)), + '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)), + MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid)); + + /* Tollhouse requires slot id to be displayed */ + bricktype = MODULE_GET_BTYPE(moduleid); + if ((bricktype == L1_BRICKTYPE_191010) || + (bricktype == L1_BRICKTYPE_1932)) + sprintf(address, "%s^%d", address, geo_slot(geoid)); +} + +subsys_initcall(sn_pci_init); +EXPORT_SYMBOL(sn_pci_fixup_slot); +EXPORT_SYMBOL(sn_pci_unfixup_slot); +EXPORT_SYMBOL(sn_pci_controller_fixup); +EXPORT_SYMBOL(sn_bus_store_sysdata); +EXPORT_SYMBOL(sn_bus_free_sysdata); +EXPORT_SYMBOL(sn_generate_path); diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/iomv.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/iomv.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,76 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <asm/io.h> +#include <asm/delay.h> +#include <asm/vga.h> +#include <asm/sn/nodepda.h> +#include <asm/sn/simulator.h> +#include <asm/sn/pda.h> +#include <asm/sn/sn_cpuid.h> +#include <asm/sn/shub_mmr.h> + +#define IS_LEGACY_VGA_IOPORT(p) \ + (((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df)) + +/** + * sn_io_addr - convert an in/out port to an i/o address + * @port: port to convert + * + * Legacy in/out instructions are converted to ld/st instructions + * on IA64. This routine will convert a port number into a valid + * SN i/o address. Used by sn_in*() and sn_out*(). + */ +void *sn_io_addr(unsigned long port) +{ + if (!IS_RUNNING_ON_SIMULATOR()) { + if (IS_LEGACY_VGA_IOPORT(port)) + port += vga_console_iobase; + /* On sn2, legacy I/O ports don't point at anything */ + if (port < (64 * 1024)) + return NULL; + return ((void *)(port | __IA64_UNCACHED_OFFSET)); + } else { + /* but the simulator uses them... */ + unsigned long addr; + + /* + * word align port, but need more than 10 bits + * for accessing registers in bedrock local block + * (so we don't do port&0xfff) + */ + addr = (is_shub2() ? 0xc00000028c000000UL : 0xc0000087cc000000UL) | ((port >> 2) << 12); + if ((port >= 0x1f0 && port <= 0x1f7) || port == 0x3f6 || port == 0x3f7) + addr |= port; + return (void *)addr; + } +} + +EXPORT_SYMBOL(sn_io_addr); + +/** + * __sn_mmiowb - I/O space memory barrier + * + * See include/asm-ia64/io.h and Documentation/DocBook/deviceiobook.tmpl + * for details. + * + * On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear. + * See PV 871084 for details about the WAR about zero value. + * + */ +void __sn_mmiowb(void) +{ + volatile unsigned long *adr = pda->pio_write_status_addr; + unsigned long val = pda->pio_write_status_val; + + while ((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != val) + cpu_relax(); +} + +EXPORT_SYMBOL(__sn_mmiowb); diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/irq.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/irq.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,444 @@ +/* + * Platform dependent support for SGI SN + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved. + */ + +#include <linux/irq.h> +#include <linux/spinlock.h> +#include <linux/init.h> +#include <asm/sn/addrs.h> +#include <asm/sn/arch.h> +#include <asm/sn/intr.h> +#include <asm/sn/pcibr_provider.h> +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/pcidev.h> +#include <asm/sn/shub_mmr.h> +#include <asm/sn/sn_sal.h> + +static void force_interrupt(int irq); +static void register_intr_pda(struct sn_irq_info *sn_irq_info); +static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); + +int sn_force_interrupt_flag = 1; +extern int sn_ioif_inited; +struct list_head **sn_irq_lh; +static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ + +u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, + struct sn_irq_info *sn_irq_info, + int req_irq, nasid_t req_nasid, + int req_slice) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, + (u64) SAL_INTR_ALLOC, (u64) local_nasid, + (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, + (u64) req_nasid, (u64) req_slice); + + return ret_stuff.status; +} + +void sn_intr_free(nasid_t local_nasid, int local_widget, + struct sn_irq_info *sn_irq_info) +{ + struct ia64_sal_retval ret_stuff; + ret_stuff.status = 0; + ret_stuff.v0 = 0; + + SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, + (u64) SAL_INTR_FREE, (u64) local_nasid, + (u64) local_widget, (u64) sn_irq_info->irq_irq, + (u64) sn_irq_info->irq_cookie, 0, 0); +} + +static unsigned int sn_startup_irq(unsigned int irq) +{ + return 0; +} + +static void sn_shutdown_irq(unsigned int irq) +{ +} + +static void sn_disable_irq(unsigned int irq) +{ +} + +static void sn_enable_irq(unsigned int irq) +{ +} + +static void sn_ack_irq(unsigned int irq) +{ + u64 event_occurred, mask; + + irq = irq & 0xff; + event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); + mask = event_occurred & SH_ALL_INT_MASK; + HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); + __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); + + move_native_irq(irq); +} + +static void sn_end_irq(unsigned int irq) +{ + int ivec; + u64 event_occurred; + + ivec = irq & 0xff; + if (ivec == SGI_UART_VECTOR) { + event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); + /* If the UART bit is set here, we may have received an + * interrupt from the UART that the driver missed. To + * make sure, we IPI ourselves to force us to look again. + */ + if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { + platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, + IA64_IPI_DM_INT, 0); + } + } + __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); + if (sn_force_interrupt_flag) + force_interrupt(irq); +} + +static void sn_irq_info_free(struct rcu_head *head); + +struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, + nasid_t nasid, int slice) +{ + int vector; + int cpuphys; + int64_t bridge; + int local_widget, status; + nasid_t local_nasid; + struct sn_irq_info *new_irq_info; + struct sn_pcibus_provider *pci_provider; + + new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); + if (new_irq_info == NULL) + return NULL; + + memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); + + bridge = (u64) new_irq_info->irq_bridge; + if (!bridge) { + kfree(new_irq_info); + return NULL; /* irq is not a device interrupt */ + } + + local_nasid = NASID_GET(bridge); + + if (local_nasid & 1) + local_widget = TIO_SWIN_WIDGETNUM(bridge); + else + local_widget = SWIN_WIDGETNUM(bridge); + + vector = sn_irq_info->irq_irq; + /* Free the old PROM new_irq_info structure */ + sn_intr_free(local_nasid, local_widget, new_irq_info); + /* Update kernels new_irq_info with new target info */ + unregister_intr_pda(new_irq_info); + + /* allocate a new PROM new_irq_info struct */ + status = sn_intr_alloc(local_nasid, local_widget, + new_irq_info, vector, + nasid, slice); + + /* SAL call failed */ + if (status) { + kfree(new_irq_info); + return NULL; + } + + cpuphys = nasid_slice_to_cpuid(nasid, slice); + new_irq_info->irq_cpuid = cpuphys; + register_intr_pda(new_irq_info); + + pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; + + /* + * If this represents a line interrupt, target it. If it's + * an msi (irq_int_bit < 0), it's already targeted. + */ + if (new_irq_info->irq_int_bit >= 0 && + pci_provider && pci_provider->target_interrupt) + (pci_provider->target_interrupt)(new_irq_info); + + spin_lock(&sn_irq_info_lock); + list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); + spin_unlock(&sn_irq_info_lock); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + +#ifdef CONFIG_SMP + set_irq_affinity_info((vector & 0xff), cpuphys, 0); +#endif + + return new_irq_info; +} + +static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask) +{ + struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; + nasid_t nasid; + int slice; + + nasid = cpuid_to_nasid(first_cpu(mask)); + slice = cpuid_to_slice(first_cpu(mask)); + + list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, + sn_irq_lh[irq], list) + (void)sn_retarget_vector(sn_irq_info, nasid, slice); +} + +struct hw_interrupt_type irq_type_sn = { + .name = "SN hub", + .startup = sn_startup_irq, + .shutdown = sn_shutdown_irq, + .enable = sn_enable_irq, + .disable = sn_disable_irq, + .ack = sn_ack_irq, + .end = sn_end_irq, + .set_affinity = sn_set_affinity_irq +}; + +unsigned int sn_local_vector_to_irq(u8 vector) +{ + return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); +} + +void sn_irq_init(void) +{ + int i; + irq_desc_t *base_desc = irq_desc; + + ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; + ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; + + for (i = 0; i < NR_IRQS; i++) { + if (base_desc[i].chip == &no_irq_type) { + base_desc[i].chip = &irq_type_sn; + } + } +} + +static void register_intr_pda(struct sn_irq_info *sn_irq_info) +{ + int irq = sn_irq_info->irq_irq; + int cpu = sn_irq_info->irq_cpuid; + + if (pdacpu(cpu)->sn_last_irq < irq) { + pdacpu(cpu)->sn_last_irq = irq; + } + + if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) + pdacpu(cpu)->sn_first_irq = irq; +} + +static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) +{ + int irq = sn_irq_info->irq_irq; + int cpu = sn_irq_info->irq_cpuid; + struct sn_irq_info *tmp_irq_info; + int i, foundmatch; + + rcu_read_lock(); + if (pdacpu(cpu)->sn_last_irq == irq) { + foundmatch = 0; + for (i = pdacpu(cpu)->sn_last_irq - 1; + i && !foundmatch; i--) { + list_for_each_entry_rcu(tmp_irq_info, + sn_irq_lh[i], + list) { + if (tmp_irq_info->irq_cpuid == cpu) { + foundmatch = 1; + break; + } + } + } + pdacpu(cpu)->sn_last_irq = i; + } + + if (pdacpu(cpu)->sn_first_irq == irq) { + foundmatch = 0; + for (i = pdacpu(cpu)->sn_first_irq + 1; + i < NR_IRQS && !foundmatch; i++) { + list_for_each_entry_rcu(tmp_irq_info, + sn_irq_lh[i], + list) { + if (tmp_irq_info->irq_cpuid == cpu) { + foundmatch = 1; + break; + } + } + } + pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); + } + rcu_read_unlock(); +} + +static void sn_irq_info_free(struct rcu_head *head) +{ + struct sn_irq_info *sn_irq_info; + + sn_irq_info = container_of(head, struct sn_irq_info, rcu); + kfree(sn_irq_info); +} + +void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) +{ + nasid_t nasid = sn_irq_info->irq_nasid; + int slice = sn_irq_info->irq_slice; + int cpu = nasid_slice_to_cpuid(nasid, slice); + + pci_dev_get(pci_dev); + sn_irq_info->irq_cpuid = cpu; + sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); + + /* link it into the sn_irq[irq] list */ + spin_lock(&sn_irq_info_lock); + list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); + reserve_irq_vector(sn_irq_info->irq_irq); + spin_unlock(&sn_irq_info_lock); + + register_intr_pda(sn_irq_info); +} + +void sn_irq_unfixup(struct pci_dev *pci_dev) +{ + struct sn_irq_info *sn_irq_info; + + /* Only cleanup IRQ stuff if this device has a host bus context */ + if (!SN_PCIDEV_BUSSOFT(pci_dev)) + return; + + sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; + if (!sn_irq_info) + return; + if (!sn_irq_info->irq_irq) { + kfree(sn_irq_info); + return; + } + + unregister_intr_pda(sn_irq_info); + spin_lock(&sn_irq_info_lock); + list_del_rcu(&sn_irq_info->list); + spin_unlock(&sn_irq_info_lock); + if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) + free_irq_vector(sn_irq_info->irq_irq); + call_rcu(&sn_irq_info->rcu, sn_irq_info_free); + pci_dev_put(pci_dev); + +} + +static inline void +sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) +{ + struct sn_pcibus_provider *pci_provider; + + pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; + if (pci_provider && pci_provider->force_interrupt) + (*pci_provider->force_interrupt)(sn_irq_info); +} + +static void force_interrupt(int irq) +{ + struct sn_irq_info *sn_irq_info; + + if (!sn_ioif_inited) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) + sn_call_force_intr_provider(sn_irq_info); + + rcu_read_unlock(); +} + +/* + * Check for lost interrupts. If the PIC int_status reg. says that + * an interrupt has been sent, but not handled, and the interrupt + * is not pending in either the cpu irr regs or in the soft irr regs, + * and the interrupt is not in service, then the interrupt may have + * been lost. Force an interrupt on that pin. It is possible that + * the interrupt is in flight, so we may generate a spurious interrupt, + * but we should never miss a real lost interrupt. + */ +static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) +{ + u64 regval; + struct pcidev_info *pcidev_info; + struct pcibus_info *pcibus_info; + + /* + * Bridge types attached to TIO (anything but PIC) do not need this WAR + * since they do not target Shub II interrupt registers. If that + * ever changes, this check needs to accomodate. + */ + if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) + return; + + pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; + if (!pcidev_info) + return; + + pcibus_info = + (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> + pdi_pcibus_info; + regval = pcireg_intr_status_get(pcibus_info); + + if (!ia64_get_irr(irq_to_vector(irq))) { + if (!test_bit(irq, pda->sn_in_service_ivecs)) { + regval &= 0xff; + if (sn_irq_info->irq_int_bit & regval & + sn_irq_info->irq_last_intr) { + regval &= ~(sn_irq_info->irq_int_bit & regval); + sn_call_force_intr_provider(sn_irq_info); + } + } + } + sn_irq_info->irq_last_intr = regval; +} + +void sn_lb_int_war_check(void) +{ + struct sn_irq_info *sn_irq_info; + int i; + + if (!sn_ioif_inited || pda->sn_first_irq == 0) + return; + + rcu_read_lock(); + for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { + list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { + sn_check_intr(i, sn_irq_info); + } + } + rcu_read_unlock(); +} + +void __init sn_irq_lh_init(void) +{ + int i; + + sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); + if (!sn_irq_lh) + panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); + + for (i = 0; i < NR_IRQS; i++) { + sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); + if (!sn_irq_lh[i]) + panic("SN PCI INIT: Failed IRQ memory allocation\n"); + + INIT_LIST_HEAD(sn_irq_lh[i]); + } +} diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/setup.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/setup.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,755 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/kdev_t.h> +#include <linux/string.h> +#include <linux/screen_info.h> +#include <linux/console.h> +#include <linux/timex.h> +#include <linux/sched.h> +#include <linux/ioport.h> +#include <linux/mm.h> +#include <linux/serial.h> +#include <linux/irq.h> +#include <linux/bootmem.h> +#include <linux/mmzone.h> +#include <linux/interrupt.h> +#include <linux/acpi.h> +#include <linux/compiler.h> +#include <linux/sched.h> +#include <linux/root_dev.h> +#include <linux/nodemask.h> +#include <linux/pm.h> +#include <linux/efi.h> + +#include <asm/io.h> +#include <asm/sal.h> +#include <asm/machvec.h> +#include <asm/system.h> +#include <asm/processor.h> +#include <asm/vga.h> +#include <asm/sn/arch.h> +#include <asm/sn/addrs.h> +#include <asm/sn/pda.h> +#include <asm/sn/nodepda.h> +#include <asm/sn/sn_cpuid.h> +#include <asm/sn/simulator.h> +#include <asm/sn/leds.h> +#include <asm/sn/bte.h> +#include <asm/sn/shub_mmr.h> +#include <asm/sn/clksupport.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/geo.h> +#include <asm/sn/sn_feature_sets.h> +#include "xtalk/xwidgetdev.h" +#include "xtalk/hubdev.h" +#include <asm/sn/klconfig.h> + + +DEFINE_PER_CPU(struct pda_s, pda_percpu); + +#define MAX_PHYS_MEMORY (1UL << IA64_MAX_PHYS_BITS) /* Max physical address supported */ + +extern void bte_init_node(nodepda_t *, cnodeid_t); + +extern void sn_timer_init(void); +extern unsigned long last_time_offset; +extern void (*ia64_mark_idle) (int); +extern void snidle(int); +extern unsigned long long (*ia64_printk_clock)(void); + +unsigned long sn_rtc_cycles_per_second; +EXPORT_SYMBOL(sn_rtc_cycles_per_second); + +DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); +EXPORT_PER_CPU_SYMBOL(__sn_hub_info); + +DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); +EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); + +DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); +EXPORT_PER_CPU_SYMBOL(__sn_nodepda); + +char sn_system_serial_number_string[128]; +EXPORT_SYMBOL(sn_system_serial_number_string); +u64 sn_partition_serial_number; +EXPORT_SYMBOL(sn_partition_serial_number); +u8 sn_partition_id; +EXPORT_SYMBOL(sn_partition_id); +u8 sn_system_size; +EXPORT_SYMBOL(sn_system_size); +u8 sn_sharing_domain_size; +EXPORT_SYMBOL(sn_sharing_domain_size); +u8 sn_coherency_id; +EXPORT_SYMBOL(sn_coherency_id); +u8 sn_region_size; +EXPORT_SYMBOL(sn_region_size); +int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */ + +short physical_node_map[MAX_NUMALINK_NODES]; +static unsigned long sn_prom_features[MAX_PROM_FEATURE_SETS]; + +EXPORT_SYMBOL(physical_node_map); + +int num_cnodes; + +static void sn_init_pdas(char **); +static void build_cnode_tables(void); + +static nodepda_t *nodepdaindr[MAX_COMPACT_NODES]; + +/* + * The format of "screen_info" is strange, and due to early i386-setup + * code. This is just enough to make the console code think we're on a + * VGA color display. + */ +struct screen_info sn_screen_info = { + .orig_x = 0, + .orig_y = 0, + .orig_video_mode = 3, + .orig_video_cols = 80, + .orig_video_ega_bx = 3, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; + +/* + * This routine can only be used during init, since + * smp_boot_data is an init data structure. + * We have to use smp_boot_data.cpu_phys_id to find + * the physical id of the processor because the normal + * cpu_physical_id() relies on data structures that + * may not be initialized yet. + */ + +static int __init pxm_to_nasid(int pxm) +{ + int i; + int nid; + + nid = pxm_to_node(pxm); + for (i = 0; i < num_node_memblks; i++) { + if (node_memblk[i].nid == nid) { + return NASID_GET(node_memblk[i].start_paddr); + } + } + return -1; +} + +/** + * early_sn_setup - early setup routine for SN platforms + * + * Sets up an initial console to aid debugging. Intended primarily + * for bringup. See start_kernel() in init/main.c. + */ + +void __init early_sn_setup(void) +{ + efi_system_table_t *efi_systab; + efi_config_table_t *config_tables; + struct ia64_sal_systab *sal_systab; + struct ia64_sal_desc_entry_point *ep; + char *p; + int i, j; + + /* + * Parse enough of the SAL tables to locate the SAL entry point. Since, console + * IO on SN2 is done via SAL calls, early_printk won't work without this. + * + * This code duplicates some of the ACPI table parsing that is in efi.c & sal.c. + * Any changes to those file may have to be made hereas well. + */ + efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab); + config_tables = __va(efi_systab->tables); + for (i = 0; i < efi_systab->nr_tables; i++) { + if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == + 0) { + sal_systab = __va(config_tables[i].table); + p = (char *)(sal_systab + 1); + for (j = 0; j < sal_systab->entry_count; j++) { + if (*p == SAL_DESC_ENTRY_POINT) { + ep = (struct ia64_sal_desc_entry_point + *)p; + ia64_sal_handler_init(__va + (ep->sal_proc), + __va(ep->gp)); + return; + } + p += SAL_DESC_SIZE(*p); + } + } + } + /* Uh-oh, SAL not available?? */ + printk(KERN_ERR "failed to find SAL entry point\n"); +} + +extern int platform_intr_list[]; +static int __initdata shub_1_1_found; + +/* + * sn_check_for_wars + * + * Set flag for enabling shub specific wars + */ + +static inline int __init is_shub_1_1(int nasid) +{ + unsigned long id; + int rev; + + if (is_shub2()) + return 0; + id = REMOTE_HUB_L(nasid, SH1_SHUB_ID); + rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT; + return rev <= 2; +} + +static void __init sn_check_for_wars(void) +{ + int cnode; + + if (is_shub2()) { + /* none yet */ + } else { + for_each_online_node(cnode) { + if (is_shub_1_1(cnodeid_to_nasid(cnode))) + shub_1_1_found = 1; + } + } +} + +/* + * Scan the EFI PCDP table (if it exists) for an acceptable VGA console + * output device. If one exists, pick it and set sn_legacy_{io,mem} to + * reflect the bus offsets needed to address it. + * + * Since pcdp support in SN is not supported in the 2.4 kernel (or at least + * the one lbs is based on) just declare the needed structs here. + * + * Reference spec http://www.dig64.org/specifications/DIG64_PCDPv20.pdf + * + * Returns 0 if no acceptable vga is found, !0 otherwise. + * + * Note: This stuff is duped here because Altix requires the PCDP to + * locate a usable VGA device due to lack of proper ACPI support. Structures + * could be used from drivers/firmware/pcdp.h, but it was decided that moving + * this file to a more public location just for Altix use was undesireable. + */ + +struct hcdp_uart_desc { + u8 pad[45]; +}; + +struct pcdp { + u8 signature[4]; /* should be 'HCDP' */ + u32 length; + u8 rev; /* should be >=3 for pcdp, <3 for hcdp */ + u8 sum; + u8 oem_id[6]; + u64 oem_tableid; + u32 oem_rev; + u32 creator_id; + u32 creator_rev; + u32 num_type0; + struct hcdp_uart_desc uart[0]; /* num_type0 of these */ + /* pcdp descriptors follow */ +} __attribute__((packed)); + +struct pcdp_device_desc { + u8 type; + u8 primary; + u16 length; + u16 index; + /* interconnect specific structure follows */ + /* device specific structure follows that */ +} __attribute__((packed)); + +struct pcdp_interface_pci { + u8 type; /* 1 == pci */ + u8 reserved; + u16 length; + u8 segment; + u8 bus; + u8 dev; + u8 fun; + u16 devid; + u16 vendid; + u32 acpi_interrupt; + u64 mmio_tra; + u64 ioport_tra; + u8 flags; + u8 translation; +} __attribute__((packed)); + +struct pcdp_vga_device { + u8 num_eas_desc; + /* ACPI Extended Address Space Desc follows */ +} __attribute__((packed)); + +/* from pcdp_device_desc.primary */ +#define PCDP_PRIMARY_CONSOLE 0x01 + +/* from pcdp_device_desc.type */ +#define PCDP_CONSOLE_INOUT 0x0 +#define PCDP_CONSOLE_DEBUG 0x1 +#define PCDP_CONSOLE_OUT 0x2 +#define PCDP_CONSOLE_IN 0x3 +#define PCDP_CONSOLE_TYPE_VGA 0x8 + +#define PCDP_CONSOLE_VGA (PCDP_CONSOLE_TYPE_VGA | PCDP_CONSOLE_OUT) + +/* from pcdp_interface_pci.type */ +#define PCDP_IF_PCI 1 + +/* from pcdp_interface_pci.translation */ +#define PCDP_PCI_TRANS_IOPORT 0x02 +#define PCDP_PCI_TRANS_MMIO 0x01 + +#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) +static void +sn_scan_pcdp(void) +{ + u8 *bp; + struct pcdp *pcdp; + struct pcdp_device_desc device; + struct pcdp_interface_pci if_pci; + extern struct efi efi; + + if (efi.hcdp == EFI_INVALID_TABLE_ADDR) + return; /* no hcdp/pcdp table */ + + pcdp = __va(efi.hcdp); + + if (pcdp->rev < 3) + return; /* only support PCDP (rev >= 3) */ + + for (bp = (u8 *)&pcdp->uart[pcdp->num_type0]; + bp < (u8 *)pcdp + pcdp->length; + bp += device.length) { + memcpy(&device, bp, sizeof(device)); + if (! (device.primary & PCDP_PRIMARY_CONSOLE)) + continue; /* not primary console */ + + if (device.type != PCDP_CONSOLE_VGA) + continue; /* not VGA descriptor */ + + memcpy(&if_pci, bp+sizeof(device), sizeof(if_pci)); + if (if_pci.type != PCDP_IF_PCI) + continue; /* not PCI interconnect */ + + if (if_pci.translation & PCDP_PCI_TRANS_IOPORT) + vga_console_iobase = + if_pci.ioport_tra | __IA64_UNCACHED_OFFSET; + + if (if_pci.translation & PCDP_PCI_TRANS_MMIO) + vga_console_membase = + if_pci.mmio_tra | __IA64_UNCACHED_OFFSET; + + break; /* once we find the primary, we're done */ + } +} +#endif + +static unsigned long sn2_rtc_initial; + +static unsigned long long ia64_sn2_printk_clock(void) +{ + unsigned long rtc_now = rtc_time(); + + return (rtc_now - sn2_rtc_initial) * + (1000000000 / sn_rtc_cycles_per_second); +} + +/** + * sn_setup - SN platform setup routine + * @cmdline_p: kernel command line + * + * Handles platform setup for SN machines. This includes determining + * the RTC frequency (via a SAL call), initializing secondary CPUs, and + * setting up per-node data areas. The console is also initialized here. + */ +void __init sn_setup(char **cmdline_p) +{ + long status, ticks_per_sec, drift; + u32 version = sn_sal_rev(); + extern void sn_cpu_init(void); + + sn2_rtc_initial = rtc_time(); + ia64_sn_plat_set_error_handling_features(); // obsolete + ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV); + ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES); + + +#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) + /* + * Handle SN vga console. + * + * SN systems do not have enough ACPI table information + * being passed from prom to identify VGA adapters and the legacy + * addresses to access them. Until that is done, SN systems rely + * on the PCDP table to identify the primary VGA console if one + * exists. + * + * However, kernel PCDP support is optional, and even if it is built + * into the kernel, it will not be used if the boot cmdline contains + * console= directives. + * + * So, to work around this mess, we duplicate some of the PCDP code + * here so that the primary VGA console (as defined by PCDP) will + * work on SN systems even if a different console (e.g. serial) is + * selected on the boot line (or CONFIG_EFI_PCDP is off). + */ + + if (! vga_console_membase) + sn_scan_pcdp(); + + if (vga_console_membase) { + /* usable vga ... make tty0 the preferred default console */ + if (!strstr(*cmdline_p, "console=")) + add_preferred_console("tty", 0, NULL); + } else { + printk(KERN_DEBUG "SGI: Disabling VGA console\n"); + if (!strstr(*cmdline_p, "console=")) + add_preferred_console("ttySG", 0, NULL); +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#else + conswitchp = NULL; +#endif /* CONFIG_DUMMY_CONSOLE */ + } +#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */ + + MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY; + + /* + * Build the tables for managing cnodes. + */ + build_cnode_tables(); + + status = + ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, + &drift); + if (status != 0 || ticks_per_sec < 100000) { + printk(KERN_WARNING + "unable to determine platform RTC clock frequency, guessing.\n"); + /* PROM gives wrong value for clock freq. so guess */ + sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; + } else + sn_rtc_cycles_per_second = ticks_per_sec; + + platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR; + + ia64_printk_clock = ia64_sn2_printk_clock; + + printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); + + /* + * we set the default root device to /dev/hda + * to make simulation easy + */ + ROOT_DEV = Root_HDA1; + + /* + * Create the PDAs and NODEPDAs for all the cpus. + */ + sn_init_pdas(cmdline_p); + + ia64_mark_idle = &snidle; + + /* + * For the bootcpu, we do this here. All other cpus will make the + * call as part of cpu_init in slave cpu initialization. + */ + sn_cpu_init(); + +#ifdef CONFIG_SMP + init_smp_config(); +#endif + screen_info = sn_screen_info; + + sn_timer_init(); + + /* + * set pm_power_off to a SAL call to allow + * sn machines to power off. The SAL call can be replaced + * by an ACPI interface call when ACPI is fully implemented + * for sn. + */ + pm_power_off = ia64_sn_power_down; + current->thread.flags |= IA64_THREAD_MIGRATION; +} + +/** + * sn_init_pdas - setup node data areas + * + * One time setup for Node Data Area. Called by sn_setup(). + */ +static void __init sn_init_pdas(char **cmdline_p) +{ + cnodeid_t cnode; + + /* + * Allocate & initalize the nodepda for each node. + */ + for_each_online_node(cnode) { + nodepdaindr[cnode] = + alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t)); + memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); + memset(nodepdaindr[cnode]->phys_cpuid, -1, + sizeof(nodepdaindr[cnode]->phys_cpuid)); + spin_lock_init(&nodepdaindr[cnode]->ptc_lock); + } + + /* + * Allocate & initialize nodepda for TIOs. For now, put them on node 0. + */ + for (cnode = num_online_nodes(); cnode < num_cnodes; cnode++) { + nodepdaindr[cnode] = + alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t)); + memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); + } + + /* + * Now copy the array of nodepda pointers to each nodepda. + */ + for (cnode = 0; cnode < num_cnodes; cnode++) + memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, + sizeof(nodepdaindr)); + + /* + * Set up IO related platform-dependent nodepda fields. + * The following routine actually sets up the hubinfo struct + * in nodepda. + */ + for_each_online_node(cnode) { + bte_init_node(nodepdaindr[cnode], cnode); + } + + /* + * Initialize the per node hubdev. This includes IO Nodes and + * headless/memless nodes. + */ + for (cnode = 0; cnode < num_cnodes; cnode++) { + hubdev_init_node(nodepdaindr[cnode], cnode); + } +} + +/** + * sn_cpu_init - initialize per-cpu data areas + * @cpuid: cpuid of the caller + * + * Called during cpu initialization on each cpu as it starts. + * Currently, initializes the per-cpu data area for SNIA. + * Also sets up a few fields in the nodepda. Also known as + * platform_cpu_init() by the ia64 machvec code. + */ +void __cpuinit sn_cpu_init(void) +{ + int cpuid; + int cpuphyid; + int nasid; + int subnode; + int slice; + int cnode; + int i; + static int wars_have_been_checked; + + cpuid = smp_processor_id(); + if (cpuid == 0 && IS_MEDUSA()) { + if (ia64_sn_is_fake_prom()) + sn_prom_type = 2; + else + sn_prom_type = 1; + printk(KERN_INFO "Running on medusa with %s PROM\n", + (sn_prom_type == 1) ? "real" : "fake"); + } + + memset(pda, 0, sizeof(pda)); + if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, + &sn_hub_info->nasid_bitmask, + &sn_hub_info->nasid_shift, + &sn_system_size, &sn_sharing_domain_size, + &sn_partition_id, &sn_coherency_id, + &sn_region_size)) + BUG(); + sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; + + /* + * Don't check status. The SAL call is not supported on all PROMs + * but a failure is harmless. + */ + (void) ia64_sn_set_cpu_number(cpuid); + + /* + * The boot cpu makes this call again after platform initialization is + * complete. + */ + if (nodepdaindr[0] == NULL) + return; + + for (i = 0; i < MAX_PROM_FEATURE_SETS; i++) + if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) + break; + + cpuphyid = get_sapicid(); + + if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) + BUG(); + + for (i=0; i < MAX_NUMNODES; i++) { + if (nodepdaindr[i]) { + nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; + nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; + nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; + } + } + + cnode = nasid_to_cnodeid(nasid); + + sn_nodepda = nodepdaindr[cnode]; + + pda->led_address = + (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); + pda->led_state = LED_ALWAYS_SET; + pda->hb_count = HZ / 2; + pda->hb_state = 0; + pda->idle_flag = 0; + + if (cpuid != 0) { + /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ + memcpy(sn_cnodeid_to_nasid, + (&per_cpu(__sn_cnodeid_to_nasid, 0)), + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); + } + + /* + * Check for WARs. + * Only needs to be done once, on BSP. + * Has to be done after loop above, because it uses this cpu's + * sn_cnodeid_to_nasid table which was just initialized if this + * isn't cpu 0. + * Has to be done before assignment below. + */ + if (!wars_have_been_checked) { + sn_check_for_wars(); + wars_have_been_checked = 1; + } + sn_hub_info->shub_1_1_found = shub_1_1_found; + + /* + * Set up addresses of PIO/MEM write status registers. + */ + { + u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; + u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2, + SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; + u64 *pio; + pio = is_shub1() ? pio1 : pio2; + pda->pio_write_status_addr = + (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); + pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; + } + + /* + * WAR addresses for SHUB 1.x. + */ + if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { + int buddy_nasid; + buddy_nasid = + cnodeid_to_nasid(numa_node_id() == + num_online_nodes() - 1 ? 0 : numa_node_id() + 1); + pda->pio_shub_war_cam_addr = + (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, + SH1_PI_CAM_CONTROL); + } +} + +/* + * Build tables for converting between NASIDs and cnodes. + */ +static inline int __init board_needs_cnode(int type) +{ + return (type == KLTYPE_SNIA || type == KLTYPE_TIO); +} + +void __init build_cnode_tables(void) +{ + int nasid; + int node; + lboard_t *brd; + + memset(physical_node_map, -1, sizeof(physical_node_map)); + memset(sn_cnodeid_to_nasid, -1, + sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); + + /* + * First populate the tables with C/M bricks. This ensures that + * cnode == node for all C & M bricks. + */ + for_each_online_node(node) { + nasid = pxm_to_nasid(node_to_pxm(node)); + sn_cnodeid_to_nasid[node] = nasid; + physical_node_map[nasid] = node; + } + + /* + * num_cnodes is total number of C/M/TIO bricks. Because of the 256 node + * limit on the number of nodes, we can't use the generic node numbers + * for this. Note that num_cnodes is incremented below as TIOs or + * headless/memoryless nodes are discovered. + */ + num_cnodes = num_online_nodes(); + + /* fakeprom does not support klgraph */ + if (IS_RUNNING_ON_FAKE_PROM()) + return; + + /* Find TIOs & headless/memoryless nodes and add them to the tables */ + for_each_online_node(node) { + kl_config_hdr_t *klgraph_header; + nasid = cnodeid_to_nasid(node); + klgraph_header = ia64_sn_get_klconfig_addr(nasid); + if (klgraph_header == NULL) + BUG(); + brd = NODE_OFFSET_TO_LBOARD(nasid, klgraph_header->ch_board_info); + while (brd) { + if (board_needs_cnode(brd->brd_type) && physical_node_map[brd->brd_nasid] < 0) { + sn_cnodeid_to_nasid[num_cnodes] = brd->brd_nasid; + physical_node_map[brd->brd_nasid] = num_cnodes++; + } + brd = find_lboard_next(brd); + } + } +} + +int +nasid_slice_to_cpuid(int nasid, int slice) +{ + long cpu; + + for (cpu = 0; cpu < NR_CPUS; cpu++) + if (cpuid_to_nasid(cpu) == nasid && + cpuid_to_slice(cpu) == slice) + return cpu; + + return -1; +} + +int sn_prom_feature_available(int id) +{ + if (id >= BITS_PER_LONG * MAX_PROM_FEATURE_SETS) + return 0; + return test_bit(id, sn_prom_features); +} +EXPORT_SYMBOL(sn_prom_feature_available); + diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,489 @@ +/* + * SN2 Platform specific SMP Support + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2006 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/threads.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/mmzone.h> +#include <linux/module.h> +#include <linux/bitops.h> +#include <linux/nodemask.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +#include <asm/processor.h> +#include <asm/irq.h> +#include <asm/sal.h> +#include <asm/system.h> +#include <asm/delay.h> +#include <asm/io.h> +#include <asm/smp.h> +#include <asm/tlb.h> +#include <asm/numa.h> +#include <asm/hw_irq.h> +#include <asm/current.h> +#include <asm/sn/sn_cpuid.h> +#include <asm/sn/sn_sal.h> +#include <asm/sn/addrs.h> +#include <asm/sn/shub_mmr.h> +#include <asm/sn/nodepda.h> +#include <asm/sn/rw_mmr.h> + +DEFINE_PER_CPU(struct ptc_stats, ptcstats); +DECLARE_PER_CPU(struct ptc_stats, ptcstats); + +static __cacheline_aligned DEFINE_SPINLOCK(sn2_global_ptc_lock); + +extern unsigned long +sn2_ptc_deadlock_recovery_core(volatile unsigned long *, unsigned long, + volatile unsigned long *, unsigned long, + volatile unsigned long *, unsigned long); +void +sn2_ptc_deadlock_recovery(short *, short, short, int, + volatile unsigned long *, unsigned long, + volatile unsigned long *, unsigned long); + +/* + * Note: some is the following is captured here to make degugging easier + * (the macros make more sense if you see the debug patch - not posted) + */ +#define sn2_ptctest 0 +#define local_node_uses_ptc_ga(sh1) ((sh1) ? 1 : 0) +#define max_active_pio(sh1) ((sh1) ? 32 : 7) +#define reset_max_active_on_deadlock() 1 +#define PTC_LOCK(sh1) ((sh1) ? &sn2_global_ptc_lock : &sn_nodepda->ptc_lock) + +struct ptc_stats { + unsigned long ptc_l; + unsigned long change_rid; + unsigned long shub_ptc_flushes; + unsigned long nodes_flushed; + unsigned long deadlocks; + unsigned long deadlocks2; + unsigned long lock_itc_clocks; + unsigned long shub_itc_clocks; + unsigned long shub_itc_clocks_max; + unsigned long shub_ptc_flushes_not_my_mm; +}; + +#define sn2_ptctest 0 + +static inline unsigned long wait_piowc(void) +{ + volatile unsigned long *piows; + unsigned long zeroval, ws; + + piows = pda->pio_write_status_addr; + zeroval = pda->pio_write_status_val; + do { + cpu_relax(); + } while (((ws = *piows) & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) != zeroval); + return (ws & SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK) != 0; +} + +/** + * sn_migrate - SN-specific task migration actions + * @task: Task being migrated to new CPU + * + * SN2 PIO writes from separate CPUs are not guaranteed to arrive in order. + * Context switching user threads which have memory-mapped MMIO may cause + * PIOs to issue from seperate CPUs, thus the PIO writes must be drained + * from the previous CPU's Shub before execution resumes on the new CPU. + */ +void sn_migrate(struct task_struct *task) +{ + pda_t *last_pda = pdacpu(task_thread_info(task)->last_cpu); + volatile unsigned long *adr = last_pda->pio_write_status_addr; + unsigned long val = last_pda->pio_write_status_val; + + /* Drain PIO writes from old CPU's Shub */ + while (unlikely((*adr & SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK) + != val)) + cpu_relax(); +} + +void sn_tlb_migrate_finish(struct mm_struct *mm) +{ + /* flush_tlb_mm is inefficient if more than 1 users of mm */ + if (mm == current->mm && mm && atomic_read(&mm->mm_users) == 1) + flush_tlb_mm(mm); +} + +/** + * sn2_global_tlb_purge - globally purge translation cache of virtual address range + * @mm: mm_struct containing virtual address range + * @start: start of virtual address range + * @end: end of virtual address range + * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc)) + * + * Purges the translation caches of all processors of the given virtual address + * range. + * + * Note: + * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context. + * - cpu_vm_mask is converted into a nodemask of the nodes containing the + * cpus in cpu_vm_mask. + * - if only one bit is set in cpu_vm_mask & it is the current cpu & the + * process is purging its own virtual address range, then only the + * local TLB needs to be flushed. This flushing can be done using + * ptc.l. This is the common case & avoids the global spinlock. + * - if multiple cpus have loaded the context, then flushing has to be + * done with ptc.g/MMRs under protection of the global ptc_lock. + */ + +void +sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned long nbits) +{ + int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; + int mymm = (mm == current->active_mm && mm == current->mm); + int use_cpu_ptcga; + volatile unsigned long *ptc0, *ptc1; + unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; + short nasids[MAX_NUMNODES], nix; + nodemask_t nodes_flushed; + int active, max_active, deadlock; + + nodes_clear(nodes_flushed); + i = 0; + + for_each_cpu_mask(cpu, mm->cpu_vm_mask) { + cnode = cpu_to_node(cpu); + node_set(cnode, nodes_flushed); + lcpu = cpu; + i++; + } + + if (i == 0) + return; + + preempt_disable(); + + if (likely(i == 1 && lcpu == smp_processor_id() && mymm)) { + do { + ia64_ptcl(start, nbits << 2); + start += (1UL << nbits); + } while (start < end); + ia64_srlz_i(); + __get_cpu_var(ptcstats).ptc_l++; + preempt_enable(); + return; + } + + if (atomic_read(&mm->mm_users) == 1 && mymm) { + flush_tlb_mm(mm); + __get_cpu_var(ptcstats).change_rid++; + preempt_enable(); + return; + } + + itc = ia64_get_itc(); + nix = 0; + for_each_node_mask(cnode, nodes_flushed) + nasids[nix++] = cnodeid_to_nasid(cnode); + + rr_value = (mm->context << 3) | REGION_NUMBER(start); + + shub1 = is_shub1(); + if (shub1) { + data0 = (1UL << SH1_PTC_0_A_SHFT) | + (nbits << SH1_PTC_0_PS_SHFT) | + (rr_value << SH1_PTC_0_RID_SHFT) | + (1UL << SH1_PTC_0_START_SHFT); + ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_0); + ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH1_PTC_1); + } else { + data0 = (1UL << SH2_PTC_A_SHFT) | + (nbits << SH2_PTC_PS_SHFT) | + (1UL << SH2_PTC_START_SHFT); + ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH2_PTC + + (rr_value << SH2_PTC_RID_SHFT)); + ptc1 = NULL; + } + + + mynasid = get_nasid(); + use_cpu_ptcga = local_node_uses_ptc_ga(shub1); + max_active = max_active_pio(shub1); + + itc = ia64_get_itc(); + spin_lock_irqsave(PTC_LOCK(shub1), flags); + itc2 = ia64_get_itc(); + + __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; + __get_cpu_var(ptcstats).shub_ptc_flushes++; + __get_cpu_var(ptcstats).nodes_flushed += nix; + if (!mymm) + __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; + + if (use_cpu_ptcga && !mymm) { + old_rr = ia64_get_rr(start); + ia64_set_rr(start, (old_rr & 0xff) | (rr_value << 8)); + ia64_srlz_d(); + } + + wait_piowc(); + do { + if (shub1) + data1 = start | (1UL << SH1_PTC_1_START_SHFT); + else + data0 = (data0 & ~SH2_PTC_ADDR_MASK) | (start & SH2_PTC_ADDR_MASK); + deadlock = 0; + active = 0; + for (ibegin = 0, i = 0; i < nix; i++) { + nasid = nasids[i]; + if (use_cpu_ptcga && unlikely(nasid == mynasid)) { + ia64_ptcga(start, nbits << 2); + ia64_srlz_i(); + } else { + ptc0 = CHANGE_NASID(nasid, ptc0); + if (ptc1) + ptc1 = CHANGE_NASID(nasid, ptc1); + pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1); + active++; + } + if (active >= max_active || i == (nix - 1)) { + if ((deadlock = wait_piowc())) { + sn2_ptc_deadlock_recovery(nasids, ibegin, i, mynasid, ptc0, data0, ptc1, data1); + if (reset_max_active_on_deadlock()) + max_active = 1; + } + active = 0; + ibegin = i + 1; + } + } + start += (1UL << nbits); + } while (start < end); + + itc2 = ia64_get_itc() - itc2; + __get_cpu_var(ptcstats).shub_itc_clocks += itc2; + if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) + __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; + + if (old_rr) { + ia64_set_rr(start, old_rr); + ia64_srlz_d(); + } + + spin_unlock_irqrestore(PTC_LOCK(shub1), flags); + + preempt_enable(); +} + +/* + * sn2_ptc_deadlock_recovery + * + * Recover from PTC deadlocks conditions. Recovery requires stepping thru each + * TLB flush transaction. The recovery sequence is somewhat tricky & is + * coded in assembly language. + */ + +void +sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid, + volatile unsigned long *ptc0, unsigned long data0, + volatile unsigned long *ptc1, unsigned long data1) +{ + short nasid, i; + unsigned long *piows, zeroval, n; + + __get_cpu_var(ptcstats).deadlocks++; + + piows = (unsigned long *) pda->pio_write_status_addr; + zeroval = pda->pio_write_status_val; + + + for (i=ib; i <= ie; i++) { + nasid = nasids[i]; + if (local_node_uses_ptc_ga(is_shub1()) && nasid == mynasid) + continue; + ptc0 = CHANGE_NASID(nasid, ptc0); + if (ptc1) + ptc1 = CHANGE_NASID(nasid, ptc1); + + n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); + __get_cpu_var(ptcstats).deadlocks2 += n; + } + +} + +/** + * sn_send_IPI_phys - send an IPI to a Nasid and slice + * @nasid: nasid to receive the interrupt (may be outside partition) + * @physid: physical cpuid to receive the interrupt. + * @vector: command to send + * @delivery_mode: delivery mechanism + * + * Sends an IPI (interprocessor interrupt) to the processor specified by + * @physid + * + * @delivery_mode can be one of the following + * + * %IA64_IPI_DM_INT - pend an interrupt + * %IA64_IPI_DM_PMI - pend a PMI + * %IA64_IPI_DM_NMI - pend an NMI + * %IA64_IPI_DM_INIT - pend an INIT interrupt + */ +void sn_send_IPI_phys(int nasid, long physid, int vector, int delivery_mode) +{ + long val; + unsigned long flags = 0; + volatile long *p; + + p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT); + val = (1UL << SH_IPI_INT_SEND_SHFT) | + (physid << SH_IPI_INT_PID_SHFT) | + ((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) | + ((long)vector << SH_IPI_INT_IDX_SHFT) | + (0x000feeUL << SH_IPI_INT_BASE_SHFT); + + mb(); + if (enable_shub_wars_1_1()) { + spin_lock_irqsave(&sn2_global_ptc_lock, flags); + } + pio_phys_write_mmr(p, val); + if (enable_shub_wars_1_1()) { + wait_piowc(); + spin_unlock_irqrestore(&sn2_global_ptc_lock, flags); + } + +} + +EXPORT_SYMBOL(sn_send_IPI_phys); + +/** + * sn2_send_IPI - send an IPI to a processor + * @cpuid: target of the IPI + * @vector: command to send + * @delivery_mode: delivery mechanism + * @redirect: redirect the IPI? + * + * Sends an IPI (InterProcessor Interrupt) to the processor specified by + * @cpuid. @vector specifies the command to send, while @delivery_mode can + * be one of the following + * + * %IA64_IPI_DM_INT - pend an interrupt + * %IA64_IPI_DM_PMI - pend a PMI + * %IA64_IPI_DM_NMI - pend an NMI + * %IA64_IPI_DM_INIT - pend an INIT interrupt + */ +void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect) +{ + long physid; + int nasid; + + physid = cpu_physical_id(cpuid); + nasid = cpuid_to_nasid(cpuid); + + /* the following is used only when starting cpus at boot time */ + if (unlikely(nasid == -1)) + ia64_sn_get_sapic_info(physid, &nasid, NULL, NULL); + + sn_send_IPI_phys(nasid, physid, vector, delivery_mode); +} + +#ifdef CONFIG_PROC_FS + +#define PTC_BASENAME "sgi_sn/ptc_statistics" + +static void *sn2_ptc_seq_start(struct seq_file *file, loff_t * offset) +{ + if (*offset < NR_CPUS) + return offset; + return NULL; +} + +static void *sn2_ptc_seq_next(struct seq_file *file, void *data, loff_t * offset) +{ + (*offset)++; + if (*offset < NR_CPUS) + return offset; + return NULL; +} + +static void sn2_ptc_seq_stop(struct seq_file *file, void *data) +{ +} + +static int sn2_ptc_seq_show(struct seq_file *file, void *data) +{ + struct ptc_stats *stat; + int cpu; + + cpu = *(loff_t *) data; + + if (!cpu) { + seq_printf(file, + "# cpu ptc_l newrid ptc_flushes nodes_flushed deadlocks lock_nsec shub_nsec shub_nsec_max not_my_mm deadlock2\n"); + seq_printf(file, "# ptctest %d\n", sn2_ptctest); + } + + if (cpu < NR_CPUS && cpu_online(cpu)) { + stat = &per_cpu(ptcstats, cpu); + seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, + stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, + stat->deadlocks, + 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, + 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, + 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, + stat->shub_ptc_flushes_not_my_mm, + stat->deadlocks2); + } + return 0; +} + +static struct seq_operations sn2_ptc_seq_ops = { + .start = sn2_ptc_seq_start, + .next = sn2_ptc_seq_next, + .stop = sn2_ptc_seq_stop, + .show = sn2_ptc_seq_show +}; + +static int sn2_ptc_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &sn2_ptc_seq_ops); +} + +static struct file_operations proc_sn2_ptc_operations = { + .open = sn2_ptc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static struct proc_dir_entry *proc_sn2_ptc; + +static int __init sn2_ptc_init(void) +{ + if (!ia64_platform_is("sn2")) + return 0; + + if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) { + printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); + return -EINVAL; + } + proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations; + spin_lock_init(&sn2_global_ptc_lock); + return 0; +} + +static void __exit sn2_ptc_exit(void) +{ + remove_proc_entry(PTC_BASENAME, NULL); +} + +module_init(sn2_ptc_init); +module_exit(sn2_ptc_exit); +#endif /* CONFIG_PROC_FS */ + diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/Makefile --- a/xen/arch/ia64/linux/Makefile Tue Dec 12 15:25:02 2006 -0700 +++ b/xen/arch/ia64/linux/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -23,6 +23,7 @@ obj-y += __moddi3.o obj-y += __moddi3.o obj-y += __umoddi3.o obj-y += carta_random.o +obj-y += io.o ## variants of divide/modulo ## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib) diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/README.origin --- a/xen/arch/ia64/linux/README.origin Tue Dec 12 15:25:02 2006 -0700 +++ b/xen/arch/ia64/linux/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -27,3 +27,6 @@ strlen.S -> linux/arch/ia64/lib/strlen. # The files below are from Linux-2.6.16.33 carta_random.S -> linux/arch/ia64/lib/carta_random.S + +# The files below are from Linux-2.6.19 +io.c -> linux/arch/ia64/lib/io.c diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/dig/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/dig/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,1 @@ +obj-y += machvec.o diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/dig/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/dig/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,7 @@ +Source files in this directory are identical copies of linux-2.6.19 files: + +NOTE: DO NOT commit changes to these files! If a file +needs to be changed, move it to ../linux-xen and follow +the instructions in the README there. + +machvec.c -> linux/arch/ia64/dig/machvec.c diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/dig/machvec.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/dig/machvec.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,3 @@ +#define MACHVEC_PLATFORM_NAME dig +#define MACHVEC_PLATFORM_HEADER <asm/machvec_dig.h> +#include <asm/machvec_init.h> diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/hp/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/hp/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,1 @@ +subdir-y += zx1 diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/hp/zx1/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/hp/zx1/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,1 @@ +obj-y += hpzx1_machvec.o diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/hp/zx1/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/hp/zx1/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,7 @@ +Source files in this directory are identical copies of linux-2.6.19 files: + +NOTE: DO NOT commit changes to these files! If a file +needs to be changed, move it to ../linux-xen and follow +the instructions in the README there. + +hpzx1_machvec.c -> linux/arch/ia64/hp/zx1/hpzx1_machvec.c diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/hp/zx1/hpzx1_machvec.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,3 @@ +#define MACHVEC_PLATFORM_NAME hpzx1 +#define MACHVEC_PLATFORM_HEADER <asm/machvec_hpzx1.h> +#include <asm/machvec_init.h> diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/io.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/io.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,164 @@ +#include <linux/module.h> +#include <linux/types.h> + +#include <asm/io.h> + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + char *dst = to; + + while (count) { + count--; + *dst++ = readb(from++); + } +} +EXPORT_SYMBOL(memcpy_fromio); + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + const char *src = from; + + while (count) { + count--; + writeb(*src++, to++); + } +} +EXPORT_SYMBOL(memcpy_toio); + +/* + * "memset" on IO memory space. + * This needs to be optimized. + */ +void memset_io(volatile void __iomem *dst, int c, long count) +{ + unsigned char ch = (char)(c & 0xff); + + while (count) { + count--; + writeb(ch, dst); + dst++; + } +} +EXPORT_SYMBOL(memset_io); + +#ifdef CONFIG_IA64_GENERIC + +#undef __ia64_inb +#undef __ia64_inw +#undef __ia64_inl +#undef __ia64_outb +#undef __ia64_outw +#undef __ia64_outl +#undef __ia64_readb +#undef __ia64_readw +#undef __ia64_readl +#undef __ia64_readq +#undef __ia64_readb_relaxed +#undef __ia64_readw_relaxed +#undef __ia64_readl_relaxed +#undef __ia64_readq_relaxed +#undef __ia64_writeb +#undef __ia64_writew +#undef __ia64_writel +#undef __ia64_writeq +#undef __ia64_mmiowb + +unsigned int +__ia64_inb (unsigned long port) +{ + return ___ia64_inb(port); +} + +unsigned int +__ia64_inw (unsigned long port) +{ + return ___ia64_inw(port); +} + +unsigned int +__ia64_inl (unsigned long port) +{ + return ___ia64_inl(port); +} + +void +__ia64_outb (unsigned char val, unsigned long port) +{ + ___ia64_outb(val, port); +} + +void +__ia64_outw (unsigned short val, unsigned long port) +{ + ___ia64_outw(val, port); +} + +void +__ia64_outl (unsigned int val, unsigned long port) +{ + ___ia64_outl(val, port); +} + +unsigned char +__ia64_readb (void __iomem *addr) +{ + return ___ia64_readb (addr); +} + +unsigned short +__ia64_readw (void __iomem *addr) +{ + return ___ia64_readw (addr); +} + +unsigned int +__ia64_readl (void __iomem *addr) +{ + return ___ia64_readl (addr); +} + +unsigned long +__ia64_readq (void __iomem *addr) +{ + return ___ia64_readq (addr); +} + +unsigned char +__ia64_readb_relaxed (void __iomem *addr) +{ + return ___ia64_readb (addr); +} + +unsigned short +__ia64_readw_relaxed (void __iomem *addr) +{ + return ___ia64_readw (addr); +} + +unsigned int +__ia64_readl_relaxed (void __iomem *addr) +{ + return ___ia64_readl (addr); +} + +unsigned long +__ia64_readq_relaxed (void __iomem *addr) +{ + return ___ia64_readq (addr); +} + +void +__ia64_mmiowb(void) +{ + ___ia64_mmiowb(); +} + +#endif /* CONFIG_IA64_GENERIC */ diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,2 @@ +subdir-y += kernel +subdir-y += pci diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/kernel/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/kernel/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,3 @@ +obj-y += machvec.o +obj-y += pio_phys.o +obj-y += ptc_deadlock.o diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/kernel/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/kernel/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,9 @@ +Source files in this directory are identical copies of linux-2.6.19 files: + +NOTE: DO NOT commit changes to these files! If a file +needs to be changed, move it to ../linux-xen and follow +the instructions in the README there. + +machvec.c -> linux/arch/ia64/sn/kernel/machvec.c +pio_phys.S -> linux/arch/ia64/sn/kernel/pio_phys.S +ptc_deadlock.S -> linux/arch/ia64/sn/kernel/sn2/ptc_deadlock.S diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/kernel/machvec.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/kernel/machvec.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,11 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved. + */ + +#define MACHVEC_PLATFORM_NAME sn2 +#define MACHVEC_PLATFORM_HEADER <asm/machvec_sn2.h> +#include <asm/machvec_init.h> diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/kernel/pio_phys.S --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/kernel/pio_phys.S Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,71 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. + * + * This file contains macros used to access MMR registers via + * uncached physical addresses. + * pio_phys_read_mmr - read an MMR + * pio_phys_write_mmr - write an MMR + * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 + * Second MMR will be skipped if address is NULL + * + * Addresses passed to these routines should be uncached physical addresses + * ie., 0x80000.... + */ + + + +#include <asm/asmmacro.h> +#include <asm/page.h> + +GLOBAL_ENTRY(pio_phys_read_mmr) + .prologue + .regstk 1,0,0,0 + .body + mov r2=psr + rsm psr.i | psr.dt + ;; + srlz.d + ld8.acq r8=[r32] + ;; + mov psr.l=r2;; + srlz.d + br.ret.sptk.many rp +END(pio_phys_read_mmr) + +GLOBAL_ENTRY(pio_phys_write_mmr) + .prologue + .regstk 2,0,0,0 + .body + mov r2=psr + rsm psr.i | psr.dt + ;; + srlz.d + st8.rel [r32]=r33 + ;; + mov psr.l=r2;; + srlz.d + br.ret.sptk.many rp +END(pio_phys_write_mmr) + +GLOBAL_ENTRY(pio_atomic_phys_write_mmrs) + .prologue + .regstk 4,0,0,0 + .body + mov r2=psr + cmp.ne p9,p0=r34,r0; + rsm psr.i | psr.dt | psr.ic + ;; + srlz.d + st8.rel [r32]=r33 +(p9) st8.rel [r34]=r35 + ;; + mov psr.l=r2;; + srlz.d + br.ret.sptk.many rp +END(pio_atomic_phys_write_mmrs) + + diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/kernel/ptc_deadlock.S Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,92 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ + +#include <asm/types.h> +#include <asm/sn/shub_mmr.h> + +#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT +#define WRITECOUNTMASK SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK +#define ALIAS_OFFSET 8 + + + .global sn2_ptc_deadlock_recovery_core + .proc sn2_ptc_deadlock_recovery_core + +sn2_ptc_deadlock_recovery_core: + .regstk 6,0,0,0 + + ptc0 = in0 + data0 = in1 + ptc1 = in2 + data1 = in3 + piowc = in4 + zeroval = in5 + piowcphy = r30 + psrsave = r2 + scr1 = r16 + scr2 = r17 + mask = r18 + + + extr.u piowcphy=piowc,0,61;; // Convert piowc to uncached physical address + dep piowcphy=-1,piowcphy,63,1 + movl mask=WRITECOUNTMASK + mov r8=r0 + +1: + cmp.ne p8,p9=r0,ptc1 // Test for shub type (ptc1 non-null on shub1) + // p8 = 1 if shub1, p9 = 1 if shub2 + + add scr2=ALIAS_OFFSET,piowc // Address of WRITE_STATUS alias register + mov scr1=7;; // Clear DEADLOCK, WRITE_ERROR, MULTI_WRITE_ERROR +(p8) st8.rel [scr2]=scr1;; +(p9) ld8.acq scr1=[scr2];; + +5: ld8.acq scr1=[piowc];; // Wait for PIOs to complete. + hint @pause + and scr2=scr1,mask;; // mask of writecount bits + cmp.ne p6,p0=zeroval,scr2 +(p6) br.cond.sptk 5b + + + + ////////////// BEGIN PHYSICAL MODE //////////////////// + mov psrsave=psr // Disable IC (no PMIs) + rsm psr.i | psr.dt | psr.ic;; + srlz.i;; + + st8.rel [ptc0]=data0 // Write PTC0 & wait for completion. + +5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. + hint @pause + and scr2=scr1,mask;; // mask of writecount bits + cmp.ne p6,p0=zeroval,scr2 +(p6) br.cond.sptk 5b;; + + tbit.nz p8,p7=scr1,DEADLOCKBIT;;// Test for DEADLOCK +(p7) cmp.ne p7,p0=r0,ptc1;; // Test for non-null ptc1 + +(p7) st8.rel [ptc1]=data1;; // Now write PTC1. + +5: ld8.acq scr1=[piowcphy];; // Wait for PIOs to complete. + hint @pause + and scr2=scr1,mask;; // mask of writecount bits + cmp.ne p6,p0=zeroval,scr2 +(p6) br.cond.sptk 5b + + tbit.nz p8,p0=scr1,DEADLOCKBIT;;// Test for DEADLOCK + + mov psr.l=psrsave;; // Reenable IC + srlz.i;; + ////////////// END PHYSICAL MODE //////////////////// + +(p8) add r8=1,r8 +(p8) br.cond.spnt 1b;; // Repeat if DEADLOCK occurred. + + br.ret.sptk rp + .endp sn2_ptc_deadlock_recovery_core diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/pci/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/pci/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,1 @@ +subdir-y += pcibr diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/pci/pcibr/Makefile --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/pci/pcibr/Makefile Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,1 @@ +obj-y += pcibr_reg.o diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/pci/pcibr/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/pci/pcibr/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,7 @@ +Source files in this directory are identical copies of linux-2.6.19 files: + +NOTE: DO NOT commit changes to these files! If a file +needs to be changed, move it to ../linux-xen and follow +the instructions in the README there. + +pcibr_reg.c -> linux/arch/ia64/sn/pci/pcibr/pcibr_reg.c diff -r c0d41ac21486 -r de3d55c10f16 xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/arch/ia64/linux/sn/pci/pcibr/pcibr_reg.c Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,285 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved. + */ + +#include <linux/interrupt.h> +#include <linux/types.h> +#include <asm/sn/io.h> +#include <asm/sn/pcibr_provider.h> +#include <asm/sn/pcibus_provider_defs.h> +#include <asm/sn/pcidev.h> +#include <asm/sn/pic.h> +#include <asm/sn/tiocp.h> + +union br_ptr { + struct tiocp tio; + struct pic pic; +}; + +/* + * Control Register Access -- Read/Write 0000_0020 + */ +void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, u64 bits) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + __sn_clrq_relaxed(&ptr->tio.cp_control, bits); + break; + case PCIBR_BRIDGETYPE_PIC: + __sn_clrq_relaxed(&ptr->pic.p_wid_control, bits); + break; + default: + panic + ("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +void pcireg_control_bit_set(struct pcibus_info *pcibus_info, u64 bits) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + __sn_setq_relaxed(&ptr->tio.cp_control, bits); + break; + case PCIBR_BRIDGETYPE_PIC: + __sn_setq_relaxed(&ptr->pic.p_wid_control, bits); + break; + default: + panic + ("pcireg_control_bit_set: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +/* + * PCI/PCIX Target Flush Register Access -- Read Only 0000_0050 + */ +u64 pcireg_tflush_get(struct pcibus_info *pcibus_info) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + u64 ret = 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = __sn_readq_relaxed(&ptr->tio.cp_tflush); + break; + case PCIBR_BRIDGETYPE_PIC: + ret = __sn_readq_relaxed(&ptr->pic.p_wid_tflush); + break; + default: + panic + ("pcireg_tflush_get: unknown bridgetype bridge 0x%p", + ptr); + } + } + + /* Read of the Target Flush should always return zero */ + if (ret != 0) + panic("pcireg_tflush_get:Target Flush failed\n"); + + return ret; +} + +/* + * Interrupt Status Register Access -- Read Only 0000_0100 + */ +u64 pcireg_intr_status_get(struct pcibus_info * pcibus_info) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + u64 ret = 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = __sn_readq_relaxed(&ptr->tio.cp_int_status); + break; + case PCIBR_BRIDGETYPE_PIC: + ret = __sn_readq_relaxed(&ptr->pic.p_int_status); + break; + default: + panic + ("pcireg_intr_status_get: unknown bridgetype bridge 0x%p", + ptr); + } + } + return ret; +} + +/* + * Interrupt Enable Register Access -- Read/Write 0000_0108 + */ +void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, u64 bits) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + __sn_clrq_relaxed(&ptr->tio.cp_int_enable, bits); + break; + case PCIBR_BRIDGETYPE_PIC: + __sn_clrq_relaxed(&ptr->pic.p_int_enable, bits); + break; + default: + panic + ("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, u64 bits) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + __sn_setq_relaxed(&ptr->tio.cp_int_enable, bits); + break; + case PCIBR_BRIDGETYPE_PIC: + __sn_setq_relaxed(&ptr->pic.p_int_enable, bits); + break; + default: + panic + ("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +/* + * Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168 + */ +void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n, + u64 addr) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + __sn_clrq_relaxed(&ptr->tio.cp_int_addr[int_n], + TIOCP_HOST_INTR_ADDR); + __sn_setq_relaxed(&ptr->tio.cp_int_addr[int_n], + (addr & TIOCP_HOST_INTR_ADDR)); + break; + case PCIBR_BRIDGETYPE_PIC: + __sn_clrq_relaxed(&ptr->pic.p_int_addr[int_n], + PIC_HOST_INTR_ADDR); + __sn_setq_relaxed(&ptr->pic.p_int_addr[int_n], + (addr & PIC_HOST_INTR_ADDR)); + break; + default: + panic + ("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +/* + * Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8 + */ +void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + writeq(1, &ptr->tio.cp_force_pin[int_n]); + break; + case PCIBR_BRIDGETYPE_PIC: + writeq(1, &ptr->pic.p_force_pin[int_n]); + break; + default: + panic + ("pcireg_force_intr_set: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +/* + * Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258 + */ +u64 pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + u64 ret = 0; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = + __sn_readq_relaxed(&ptr->tio.cp_wr_req_buf[device]); + break; + case PCIBR_BRIDGETYPE_PIC: + ret = + __sn_readq_relaxed(&ptr->pic.p_wr_req_buf[device]); + break; + default: + panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", ptr); + } + + } + /* Read of the Write Buffer Flush should always return zero */ + return ret; +} + +void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index, + u64 val) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + writeq(val, &ptr->tio.cp_int_ate_ram[ate_index]); + break; + case PCIBR_BRIDGETYPE_PIC: + writeq(val, &ptr->pic.p_int_ate_ram[ate_index]); + break; + default: + panic + ("pcireg_int_ate_set: unknown bridgetype bridge 0x%p", + ptr); + } + } +} + +u64 __iomem *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index) +{ + union br_ptr __iomem *ptr = (union br_ptr __iomem *)pcibus_info->pbi_buscommon.bs_base; + u64 __iomem *ret = NULL; + + if (pcibus_info) { + switch (pcibus_info->pbi_bridge_type) { + case PCIBR_BRIDGETYPE_TIOCP: + ret = &ptr->tio.cp_int_ate_ram[ate_index]; + break; + case PCIBR_BRIDGETYPE_PIC: + ret = &ptr->pic.p_int_ate_ram[ate_index]; + break; + default: + panic + ("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p", + ptr); + } + } + return ret; +} diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-null/linux/ioport.h --- a/xen/include/asm-ia64/linux-null/linux/ioport.h Tue Dec 12 15:25:02 2006 -0700 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,1 +0,0 @@ -/* This file is intentionally left empty. */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/README.origin --- a/xen/include/asm-ia64/linux-xen/asm/README.origin Tue Dec 12 15:25:02 2006 -0700 +++ b/xen/include/asm-ia64/linux-xen/asm/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -34,3 +34,9 @@ iosapic.h -> linux/include/asm-ia64/ios # The files below are from Linux-2.6.16.33 perfmon.h -> linux/include/asm-ia64/perfmon.h perfmon_default_smpl.h -> linux/include/asm-ia64/perfmon_default_smpl.h + +# The files below are from Linux-2.6.19 +machvec_dig.h -> linux/include/asm-ia64/machvec_dig.h +machvec_sn2.h -> linux/include/asm-ia64/machvec_sn2.h +machvec_hpzx1.h -> linux/include/asm-ia64/machvec_hpzx1.h +machvec_pci.h -> linux/include/asm-ia64/pci.h diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/machvec_dig.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec_dig.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,16 @@ +#ifndef _ASM_IA64_MACHVEC_DIG_h +#define _ASM_IA64_MACHVEC_DIG_h + +extern ia64_mv_setup_t dig_setup; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "dig" +#define platform_setup dig_setup + +#endif /* _ASM_IA64_MACHVEC_DIG_h */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec_hpzx1.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,37 @@ +#ifndef _ASM_IA64_MACHVEC_HPZX1_h +#define _ASM_IA64_MACHVEC_HPZX1_h + +extern ia64_mv_setup_t dig_setup; +extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; +extern ia64_mv_dma_free_coherent sba_free_coherent; +extern ia64_mv_dma_map_single sba_map_single; +extern ia64_mv_dma_unmap_single sba_unmap_single; +extern ia64_mv_dma_map_sg sba_map_sg; +extern ia64_mv_dma_unmap_sg sba_unmap_sg; +extern ia64_mv_dma_supported sba_dma_supported; +extern ia64_mv_dma_mapping_error sba_dma_mapping_error; + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "hpzx1" +#define platform_setup dig_setup +#define platform_dma_init machvec_noop +#define platform_dma_alloc_coherent sba_alloc_coherent +#define platform_dma_free_coherent sba_free_coherent +#define platform_dma_map_single sba_map_single +#define platform_dma_unmap_single sba_unmap_single +#define platform_dma_map_sg sba_map_sg +#define platform_dma_unmap_sg sba_unmap_sg +#define platform_dma_sync_single_for_cpu machvec_dma_sync_single +#define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg +#define platform_dma_sync_single_for_device machvec_dma_sync_single +#define platform_dma_sync_sg_for_device machvec_dma_sync_sg +#define platform_dma_supported sba_dma_supported +#define platform_dma_mapping_error sba_dma_mapping_error + +#endif /* _ASM_IA64_MACHVEC_HPZX1_h */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/machvec_sn2.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2002-2003,2006 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/NoticeExplan + */ + +#ifndef _ASM_IA64_MACHVEC_SN2_H +#define _ASM_IA64_MACHVEC_SN2_H + +extern ia64_mv_setup_t sn_setup; +extern ia64_mv_cpu_init_t sn_cpu_init; +extern ia64_mv_irq_init_t sn_irq_init; +extern ia64_mv_send_ipi_t sn2_send_IPI; +extern ia64_mv_timer_interrupt_t sn_timer_interrupt; +extern ia64_mv_global_tlb_purge_t sn2_global_tlb_purge; +extern ia64_mv_tlb_migrate_finish_t sn_tlb_migrate_finish; +extern ia64_mv_local_vector_to_irq sn_local_vector_to_irq; +extern ia64_mv_pci_get_legacy_mem_t sn_pci_get_legacy_mem; +extern ia64_mv_pci_legacy_read_t sn_pci_legacy_read; +extern ia64_mv_pci_legacy_write_t sn_pci_legacy_write; +extern ia64_mv_inb_t __sn_inb; +extern ia64_mv_inw_t __sn_inw; +extern ia64_mv_inl_t __sn_inl; +extern ia64_mv_outb_t __sn_outb; +extern ia64_mv_outw_t __sn_outw; +extern ia64_mv_outl_t __sn_outl; +extern ia64_mv_mmiowb_t __sn_mmiowb; +extern ia64_mv_readb_t __sn_readb; +extern ia64_mv_readw_t __sn_readw; +extern ia64_mv_readl_t __sn_readl; +extern ia64_mv_readq_t __sn_readq; +extern ia64_mv_readb_t __sn_readb_relaxed; +extern ia64_mv_readw_t __sn_readw_relaxed; +extern ia64_mv_readl_t __sn_readl_relaxed; +extern ia64_mv_readq_t __sn_readq_relaxed; +extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; +extern ia64_mv_dma_free_coherent sn_dma_free_coherent; +extern ia64_mv_dma_map_single sn_dma_map_single; +extern ia64_mv_dma_unmap_single sn_dma_unmap_single; +extern ia64_mv_dma_map_sg sn_dma_map_sg; +extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; +extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; +extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; +extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; +extern ia64_mv_dma_sync_sg_for_device sn_dma_sync_sg_for_device; +extern ia64_mv_dma_mapping_error sn_dma_mapping_error; +extern ia64_mv_dma_supported sn_dma_supported; +extern ia64_mv_migrate_t sn_migrate; +extern ia64_mv_setup_msi_irq_t sn_setup_msi_irq; +extern ia64_mv_teardown_msi_irq_t sn_teardown_msi_irq; + + +/* + * This stuff has dual use! + * + * For a generic kernel, the macros are used to initialize the + * platform's machvec structure. When compiling a non-generic kernel, + * the macros are used directly. + */ +#define platform_name "sn2" +#define platform_setup sn_setup +#define platform_cpu_init sn_cpu_init +#define platform_irq_init sn_irq_init +#define platform_send_ipi sn2_send_IPI +#define platform_timer_interrupt sn_timer_interrupt +#define platform_global_tlb_purge sn2_global_tlb_purge +#define platform_tlb_migrate_finish sn_tlb_migrate_finish +#define platform_pci_fixup sn_pci_fixup +#define platform_inb __sn_inb +#define platform_inw __sn_inw +#define platform_inl __sn_inl +#define platform_outb __sn_outb +#define platform_outw __sn_outw +#define platform_outl __sn_outl +#define platform_mmiowb __sn_mmiowb +#define platform_readb __sn_readb +#define platform_readw __sn_readw +#define platform_readl __sn_readl +#define platform_readq __sn_readq +#define platform_readb_relaxed __sn_readb_relaxed +#define platform_readw_relaxed __sn_readw_relaxed +#define platform_readl_relaxed __sn_readl_relaxed +#define platform_readq_relaxed __sn_readq_relaxed +#define platform_local_vector_to_irq sn_local_vector_to_irq +#define platform_pci_get_legacy_mem sn_pci_get_legacy_mem +#define platform_pci_legacy_read sn_pci_legacy_read +#define platform_pci_legacy_write sn_pci_legacy_write +#define platform_dma_init machvec_noop +#define platform_dma_alloc_coherent sn_dma_alloc_coherent +#define platform_dma_free_coherent sn_dma_free_coherent +#define platform_dma_map_single sn_dma_map_single +#define platform_dma_unmap_single sn_dma_unmap_single +#define platform_dma_map_sg sn_dma_map_sg +#define platform_dma_unmap_sg sn_dma_unmap_sg +#define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu +#define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu +#define platform_dma_sync_single_for_device sn_dma_sync_single_for_device +#define platform_dma_sync_sg_for_device sn_dma_sync_sg_for_device +#define platform_dma_mapping_error sn_dma_mapping_error +#define platform_dma_supported sn_dma_supported +#define platform_migrate sn_migrate +#ifdef CONFIG_PCI_MSI +#define platform_setup_msi_irq sn_setup_msi_irq +#define platform_teardown_msi_irq sn_teardown_msi_irq +#else +#define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL) +#define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL) +#endif + +#include <asm/sn/io.h> + +#endif /* _ASM_IA64_MACHVEC_SN2_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/pci.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/pci.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,174 @@ +#ifndef _ASM_IA64_PCI_H +#define _ASM_IA64_PCI_H + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <asm/io.h> +#include <asm/scatterlist.h> + +/* + * Can be used to override the logic in pci_scan_bus for skipping already-configured bus + * numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the + * loader. + */ +#define pcibios_assign_all_busses() 0 +#define pcibios_scan_all_fns(a, b) 0 + +#define PCIBIOS_MIN_IO 0x1000 +#define PCIBIOS_MIN_MEM 0x10000000 + +void pcibios_config_init(void); + +struct pci_dev; + +/* + * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct correspondence + * between device bus addresses and CPU physical addresses. Platforms with a hardware I/O + * MMU _must_ turn this off to suppress the bounce buffer handling code in the block and + * network device layers. Platforms with separate bus address spaces _must_ turn this off + * and provide a device DMA mapping implementation that takes care of the necessary + * address translation. + * + * For now, the ia64 platforms which may have separate/multiple bus address spaces all + * have I/O MMUs which support the merging of physically discontiguous buffers, so we can + * use that as the sole factor to determine the setting of PCI_DMA_BUS_IS_PHYS. + */ +extern unsigned long ia64_max_iommu_merge_mask; +#define PCI_DMA_BUS_IS_PHYS (ia64_max_iommu_merge_mask == ~0UL) + +static inline void +pcibios_set_master (struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + +static inline void +pcibios_penalize_isa_irq (int irq, int active) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +#define HAVE_ARCH_PCI_MWI 1 +extern int pcibios_prep_mwi (struct pci_dev *); + +#include <asm-generic/pci-dma-compat.h> + +/* pci_unmap_{single,page} is not a nop, thus... */ +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) + +/* The ia64 platform always supports 64-bit addressing. */ +#define pci_dac_dma_supported(pci_dev, mask) (1) +#define pci_dac_page_to_dma(dev,pg,off,dir) ((dma_addr_t) page_to_bus(pg) + (off)) +#define pci_dac_dma_to_page(dev,dma_addr) (virt_to_page(bus_to_virt(dma_addr))) +#define pci_dac_dma_to_offset(dev,dma_addr) offset_in_page(dma_addr) +#define pci_dac_dma_sync_single_for_cpu(dev,dma_addr,len,dir) do { } while (0) +#define pci_dac_dma_sync_single_for_device(dev,dma_addr,len,dir) do { mb(); } while (0) + +#define sg_dma_len(sg) ((sg)->dma_length) +#define sg_dma_address(sg) ((sg)->dma_address) + +#ifdef CONFIG_PCI +static inline void pci_dma_burst_advice(struct pci_dev *pdev, + enum pci_dma_burst_strategy *strat, + unsigned long *strategy_parameter) +{ + unsigned long cacheline_size; + u8 byte; + + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte); + if (byte == 0) + cacheline_size = 1024; + else + cacheline_size = (int) byte * 4; + + *strat = PCI_DMA_BURST_MULTIPLE; + *strategy_parameter = cacheline_size; +} +#endif + +#define HAVE_PCI_MMAP +extern int pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma, + enum pci_mmap_state mmap_state, int write_combine); +#define HAVE_PCI_LEGACY +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma); +extern ssize_t pci_read_legacy_io(struct kobject *kobj, char *buf, loff_t off, + size_t count); +extern ssize_t pci_write_legacy_io(struct kobject *kobj, char *buf, loff_t off, + size_t count); +extern int pci_mmap_legacy_mem(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma); + +#define pci_get_legacy_mem platform_pci_get_legacy_mem +#define pci_legacy_read platform_pci_legacy_read +#define pci_legacy_write platform_pci_legacy_write + +struct pci_window { + struct resource resource; + u64 offset; +}; + +struct pci_controller { + void *acpi_handle; + void *iommu; + int segment; + int node; /* nearest node with memory or -1 for global allocation */ + + unsigned int windows; + struct pci_window *window; + + void *platform_data; +}; + +#define PCI_CONTROLLER(busdev) ((struct pci_controller *) busdev->sysdata) +#define pci_domain_nr(busdev) (PCI_CONTROLLER(busdev)->segment) + +extern struct pci_ops pci_root_ops; + +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return (pci_domain_nr(bus) != 0); +} + +static inline void pcibios_add_platform_entries(struct pci_dev *dev) +{ +} + +extern void pcibios_resource_to_bus(struct pci_dev *dev, + struct pci_bus_region *region, struct resource *res); + +extern void pcibios_bus_to_resource(struct pci_dev *dev, + struct resource *res, struct pci_bus_region *region); + +static inline struct resource * +pcibios_select_root(struct pci_dev *pdev, struct resource *res) +{ + struct resource *root = NULL; + + if (res->flags & IORESOURCE_IO) + root = &ioport_resource; + if (res->flags & IORESOURCE_MEM) + root = &iomem_resource; + + return root; +} + +#define pcibios_scan_all_fns(a, b) 0 + +#endif /* _ASM_IA64_PCI_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,16 @@ +# Source files in this directory are near-identical copies of linux-2.6.19 +# files: + +# NOTE: ALL changes to these files should be clearly marked +# (e.g. with #ifdef XEN or XEN in a comment) so that they can be +# easily updated to future versions of the corresponding Linux files. + +addrs.h -> linux/include/asm-ia64/sn/addrs.h +arch.h -> linux/include/asm-ia64/sn/arch.h +hubdev.h -> linux/arch/ia64/sn/include/xtalk/hubdev.h +intr.h -> linux/include/asm-ia64/sn/intr.h +io.h -> linux/include/asm-ia64/sn/io.h +nodepda.h -> linux/include/asm-ia64/sn/nodepda.h +pcibr_provider.h -> linux/include/asm-ia64/sn/pcibr_provider.h +rw_mmr.h -> linux/include/asm-ia64/sn/rw_mmr.h +types.h -> linux/include/asm-ia64/sn/types.h diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/addrs.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/addrs.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,299 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 1992-1999,2001-2005 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_ADDRS_H +#define _ASM_IA64_SN_ADDRS_H + +#include <asm/percpu.h> +#include <asm/sn/types.h> +#include <asm/sn/arch.h> +#include <asm/sn/pda.h> + +/* + * Memory/SHUB Address Format: + * +-+---------+--+--------------+ + * |0| NASID |AS| NodeOffset | + * +-+---------+--+--------------+ + * + * NASID: (low NASID bit is 0) Memory and SHUB MMRs + * AS: 2-bit Address Space Identifier. Used only if low NASID bit is 0 + * 00: Local Resources and MMR space + * Top bit of NodeOffset + * 0: Local resources space + * node id: + * 0: IA64/NT compatibility space + * 2: Local MMR Space + * 4: Local memory, regardless of local node id + * 1: Global MMR space + * 01: GET space. + * 10: AMO space. + * 11: Cacheable memory space. + * + * NodeOffset: byte offset + * + * + * TIO address format: + * +-+----------+--+--------------+ + * |0| NASID |AS| Nodeoffset | + * +-+----------+--+--------------+ + * + * NASID: (low NASID bit is 1) TIO + * AS: 2-bit Chiplet Identifier + * 00: TIO LB (Indicates TIO MMR access.) + * 01: TIO ICE (indicates coretalk space access.) + * + * NodeOffset: top bit must be set. + * + * + * Note that in both of the above address formats, the low + * NASID bit indicates if the reference is to the SHUB or TIO MMRs. + */ + + +/* + * Define basic shift & mask constants for manipulating NASIDs and AS values. + */ +#define NASID_BITMASK (sn_hub_info->nasid_bitmask) +#define NASID_SHIFT (sn_hub_info->nasid_shift) +#define AS_SHIFT (sn_hub_info->as_shift) +#define AS_BITMASK 0x3UL + +#define NASID_MASK ((u64)NASID_BITMASK << NASID_SHIFT) +#define AS_MASK ((u64)AS_BITMASK << AS_SHIFT) + + +/* + * AS values. These are the same on both SHUB1 & SHUB2. + */ +#define AS_GET_VAL 1UL +#define AS_AMO_VAL 2UL +#define AS_CAC_VAL 3UL +#define AS_GET_SPACE (AS_GET_VAL << AS_SHIFT) +#define AS_AMO_SPACE (AS_AMO_VAL << AS_SHIFT) +#define AS_CAC_SPACE (AS_CAC_VAL << AS_SHIFT) + + +/* + * Virtual Mode Local & Global MMR space. + */ +#define SH1_LOCAL_MMR_OFFSET 0x8000000000UL +#define SH2_LOCAL_MMR_OFFSET 0x0200000000UL +#define LOCAL_MMR_OFFSET (is_shub2() ? SH2_LOCAL_MMR_OFFSET : SH1_LOCAL_MMR_OFFSET) +#define LOCAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | LOCAL_MMR_OFFSET) +#define LOCAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | LOCAL_MMR_OFFSET) + +#define SH1_GLOBAL_MMR_OFFSET 0x0800000000UL +#define SH2_GLOBAL_MMR_OFFSET 0x0300000000UL +#define GLOBAL_MMR_OFFSET (is_shub2() ? SH2_GLOBAL_MMR_OFFSET : SH1_GLOBAL_MMR_OFFSET) +#define GLOBAL_MMR_SPACE (__IA64_UNCACHED_OFFSET | GLOBAL_MMR_OFFSET) + +/* + * Physical mode addresses + */ +#define GLOBAL_PHYS_MMR_SPACE (RGN_BASE(RGN_HPAGE) | GLOBAL_MMR_OFFSET) + + +/* + * Clear region & AS bits. + */ +#define TO_PHYS_MASK (~(RGN_BITS | AS_MASK)) + + +/* + * Misc NASID manipulation. + */ +#define NASID_SPACE(n) ((u64)(n) << NASID_SHIFT) +#define REMOTE_ADDR(n,a) (NASID_SPACE(n) | (a)) +#define NODE_OFFSET(x) ((x) & (NODE_ADDRSPACE_SIZE - 1)) +#define NODE_ADDRSPACE_SIZE (1UL << AS_SHIFT) +#define NASID_GET(x) (int) (((u64) (x) >> NASID_SHIFT) & NASID_BITMASK) +#define LOCAL_MMR_ADDR(a) (LOCAL_MMR_SPACE | (a)) +#define GLOBAL_MMR_ADDR(n,a) (GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a)) +#define GLOBAL_MMR_PHYS_ADDR(n,a) (GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a)) +#define GLOBAL_CAC_ADDR(n,a) (CAC_BASE | REMOTE_ADDR(n,a)) +#define CHANGE_NASID(n,x) ((void *)(((u64)(x) & ~NASID_MASK) | NASID_SPACE(n))) +#define IS_TIO_NASID(n) ((n) & 1) + + +/* non-II mmr's start at top of big window space (4G) */ +#define BWIN_TOP 0x0000000100000000UL + +/* + * general address defines + */ +#define CAC_BASE (PAGE_OFFSET | AS_CAC_SPACE) +#define AMO_BASE (__IA64_UNCACHED_OFFSET | AS_AMO_SPACE) +#define AMO_PHYS_BASE (RGN_BASE(RGN_HPAGE) | AS_AMO_SPACE) +#define GET_BASE (PAGE_OFFSET | AS_GET_SPACE) + +/* + * Convert Memory addresses between various addressing modes. + */ +#define TO_PHYS(x) (TO_PHYS_MASK & (x)) +#define TO_CAC(x) (CAC_BASE | TO_PHYS(x)) +#ifdef CONFIG_SGI_SN +#define TO_AMO(x) (AMO_BASE | TO_PHYS(x)) +#define TO_GET(x) (GET_BASE | TO_PHYS(x)) +#else +#define TO_AMO(x) ({ BUG(); x; }) +#define TO_GET(x) ({ BUG(); x; }) +#endif + +/* + * Covert from processor physical address to II/TIO physical address: + * II - squeeze out the AS bits + * TIO- requires a chiplet id in bits 38-39. For DMA to memory, + * the chiplet id is zero. If we implement TIO-TIO dma, we might need + * to insert a chiplet id into this macro. However, it is our belief + * right now that this chiplet id will be ICE, which is also zero. + */ +#define SH1_TIO_PHYS_TO_DMA(x) \ + ((((u64)(NASID_GET(x))) << 40) | NODE_OFFSET(x)) + +#define SH2_NETWORK_BANK_OFFSET(x) \ + ((u64)(x) & ((1UL << (sn_hub_info->nasid_shift - 4)) -1)) + +#define SH2_NETWORK_BANK_SELECT(x) \ + ((((u64)(x) & (0x3UL << (sn_hub_info->nasid_shift - 4))) \ + >> (sn_hub_info->nasid_shift - 4)) << 36) + +#define SH2_NETWORK_ADDRESS(x) \ + (SH2_NETWORK_BANK_OFFSET(x) | SH2_NETWORK_BANK_SELECT(x)) + +#define SH2_TIO_PHYS_TO_DMA(x) \ + (((u64)(NASID_GET(x)) << 40) | SH2_NETWORK_ADDRESS(x)) + +#define PHYS_TO_TIODMA(x) \ + (is_shub1() ? SH1_TIO_PHYS_TO_DMA(x) : SH2_TIO_PHYS_TO_DMA(x)) + +#define PHYS_TO_DMA(x) \ + ((((u64)(x) & NASID_MASK) >> 2) | NODE_OFFSET(x)) + + +/* + * Macros to test for address type. + */ +#define IS_AMO_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_BASE) +#define IS_AMO_PHYS_ADDRESS(x) (((u64)(x) & (RGN_BITS | AS_MASK)) == AMO_PHYS_BASE) + + +/* + * The following definitions pertain to the IO special address + * space. They define the location of the big and little windows + * of any given node. + */ +#define BWIN_SIZE_BITS 29 /* big window size: 512M */ +#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */ +#define NODE_SWIN_BASE(n, w) ((w == 0) ? NODE_BWIN_BASE((n), SWIN0_BIGWIN) \ + : RAW_NODE_SWIN_BASE(n, w)) +#define TIO_SWIN_BASE(n, w) (TIO_IO_BASE(n) + \ + ((u64) (w) << TIO_SWIN_SIZE_BITS)) +#define NODE_IO_BASE(n) (GLOBAL_MMR_SPACE | NASID_SPACE(n)) +#define TIO_IO_BASE(n) (__IA64_UNCACHED_OFFSET | NASID_SPACE(n)) +#define BWIN_SIZE (1UL << BWIN_SIZE_BITS) +#define NODE_BWIN_BASE0(n) (NODE_IO_BASE(n) + BWIN_SIZE) +#define NODE_BWIN_BASE(n, w) (NODE_BWIN_BASE0(n) + ((u64) (w) << BWIN_SIZE_BITS)) +#define RAW_NODE_SWIN_BASE(n, w) (NODE_IO_BASE(n) + ((u64) (w) << SWIN_SIZE_BITS)) +#define BWIN_WIDGET_MASK 0x7 +#define BWIN_WINDOWNUM(x) (((x) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK) +#define SH1_IS_BIG_WINDOW_ADDR(x) ((x) & BWIN_TOP) + +#define TIO_BWIN_WINDOW_SELECT_MASK 0x7 +#define TIO_BWIN_WINDOWNUM(x) (((x) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK) + +#define TIO_HWIN_SHIFT_BITS 33 +#define TIO_HWIN(x) (NODE_OFFSET(x) >> TIO_HWIN_SHIFT_BITS) + +/* + * The following definitions pertain to the IO special address + * space. They define the location of the big and little windows + * of any given node. + */ + +#define SWIN_SIZE_BITS 24 +#define SWIN_WIDGET_MASK 0xF + +#define TIO_SWIN_SIZE_BITS 28 +#define TIO_SWIN_SIZE (1UL << TIO_SWIN_SIZE_BITS) +#define TIO_SWIN_WIDGET_MASK 0x3 + +/* + * Convert smallwindow address to xtalk address. + * + * 'addr' can be physical or virtual address, but will be converted + * to Xtalk address in the range 0 -> SWINZ_SIZEMASK + */ +#define SWIN_WIDGETNUM(x) (((x) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK) +#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK) + + +/* + * The following macros produce the correct base virtual address for + * the hub registers. The REMOTE_HUB_* macro produce + * the address for the specified hub's registers. The intent is + * that the appropriate PI, MD, NI, or II register would be substituted + * for x. + * + * WARNING: + * When certain Hub chip workaround are defined, it's not sufficient + * to dereference the *_HUB_ADDR() macros. You should instead use + * HUB_L() and HUB_S() if you must deal with pointers to hub registers. + * Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S(). + * They're always safe. + */ +/* Shub1 TIO & MMR addressing macros */ +#define SH1_TIO_IOSPACE_ADDR(n,x) \ + GLOBAL_MMR_ADDR(n,x) + +#define SH1_REMOTE_BWIN_MMR(n,x) \ + GLOBAL_MMR_ADDR(n,x) + +#define SH1_REMOTE_SWIN_MMR(n,x) \ + (NODE_SWIN_BASE(n,1) + 0x800000UL + (x)) + +#define SH1_REMOTE_MMR(n,x) \ + (SH1_IS_BIG_WINDOW_ADDR(x) ? SH1_REMOTE_BWIN_MMR(n,x) : \ + SH1_REMOTE_SWIN_MMR(n,x)) + +/* Shub1 TIO & MMR addressing macros */ +#define SH2_TIO_IOSPACE_ADDR(n,x) \ + ((__IA64_UNCACHED_OFFSET | REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2))) + +#define SH2_REMOTE_MMR(n,x) \ + GLOBAL_MMR_ADDR(n,x) + + +/* TIO & MMR addressing macros that work on both shub1 & shub2 */ +#define TIO_IOSPACE_ADDR(n,x) \ + ((u64 *)(is_shub1() ? SH1_TIO_IOSPACE_ADDR(n,x) : \ + SH2_TIO_IOSPACE_ADDR(n,x))) + +#define SH_REMOTE_MMR(n,x) \ + (is_shub1() ? SH1_REMOTE_MMR(n,x) : SH2_REMOTE_MMR(n,x)) + +#define REMOTE_HUB_ADDR(n,x) \ + (IS_TIO_NASID(n) ? ((volatile u64*)TIO_IOSPACE_ADDR(n,x)) : \ + ((volatile u64*)SH_REMOTE_MMR(n,x))) + + +#define HUB_L(x) (*((volatile typeof(*x) *)x)) +#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d)) + +#define REMOTE_HUB_L(n, a) HUB_L(REMOTE_HUB_ADDR((n), (a))) +#define REMOTE_HUB_S(n, a, d) HUB_S(REMOTE_HUB_ADDR((n), (a)), (d)) + +/* + * Coretalk address breakdown + */ +#define CTALK_NASID_SHFT 40 +#define CTALK_NASID_MASK (0x3FFFULL << CTALK_NASID_SHFT) +#define CTALK_CID_SHFT 38 +#define CTALK_CID_MASK (0x3ULL << CTALK_CID_SHFT) +#define CTALK_NODE_OFFSET 0x3FFFFFFFFF + +#endif /* _ASM_IA64_SN_ADDRS_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/arch.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/arch.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,85 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * SGI specific setup. + * + * Copyright (C) 1995-1997,1999,2001-2005 Silicon Graphics, Inc. All rights reserved. + * Copyright (C) 1999 Ralf Baechle (ralf@xxxxxxx) + */ +#ifndef _ASM_IA64_SN_ARCH_H +#define _ASM_IA64_SN_ARCH_H + +#include <linux/numa.h> +#include <asm/types.h> +#include <asm/percpu.h> +#include <asm/sn/types.h> +#include <asm/sn/sn_cpuid.h> + +/* + * This is the maximum number of NUMALINK nodes that can be part of a single + * SSI kernel. This number includes C-brick, M-bricks, and TIOs. Nodes in + * remote partitions are NOT included in this number. + * The number of compact nodes cannot exceed size of a coherency domain. + * The purpose of this define is to specify a node count that includes + * all C/M/TIO nodes in an SSI system. + * + * SGI system can currently support up to 256 C/M nodes plus additional TIO nodes. + * + * Note: ACPI20 has an architectural limit of 256 nodes. When we upgrade + * to ACPI3.0, this limit will be removed. The notion of "compact nodes" + * should be deleted and TIOs should be included in MAX_NUMNODES. + */ +#define MAX_TIO_NODES MAX_NUMNODES +#define MAX_COMPACT_NODES (MAX_NUMNODES + MAX_TIO_NODES) + +/* + * Maximum number of nodes in all partitions and in all coherency domains. + * This is the total number of nodes accessible in the numalink fabric. It + * includes all C & M bricks, plus all TIOs. + * + * This value is also the value of the maximum number of NASIDs in the numalink + * fabric. + */ +#define MAX_NUMALINK_NODES 16384 + +/* + * The following defines attributes of the HUB chip. These attributes are + * frequently referenced. They are kept in the per-cpu data areas of each cpu. + * They are kept together in a struct to minimize cache misses. + */ +struct sn_hub_info_s { + u8 shub2; + u8 nasid_shift; + u8 as_shift; + u8 shub_1_1_found; + u16 nasid_bitmask; +}; +DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); +#define sn_hub_info (&__get_cpu_var(__sn_hub_info)) +#define is_shub2() (sn_hub_info->shub2) +#define is_shub1() (sn_hub_info->shub2 == 0) + +/* + * Use this macro to test if shub 1.1 wars should be enabled + */ +#define enable_shub_wars_1_1() (sn_hub_info->shub_1_1_found) + + +/* + * Compact node ID to nasid mappings kept in the per-cpu data areas of each + * cpu. + */ +DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); +#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) + + +extern u8 sn_partition_id; +extern u8 sn_system_size; +extern u8 sn_sharing_domain_size; +extern u8 sn_region_size; + +extern void sn_flush_all_caches(long addr, long bytes); + +#endif /* _ASM_IA64_SN_ARCH_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/hubdev.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,91 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H +#define _ASM_IA64_SN_XTALK_HUBDEV_H + +#include "xtalk/xwidgetdev.h" + +#define HUB_WIDGET_ID_MAX 0xf +#define DEV_PER_WIDGET (2*2*8) +#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */ +#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1) +#define IIO_ITTE_WIDGET_SHIFT 8 + +#define IIO_ITTE_WIDGET(itte) \ + (((itte) >> IIO_ITTE_WIDGET_SHIFT) & IIO_ITTE_WIDGET_MASK) + +/* + * Use the top big window as a surrogate for the first small window + */ +#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW +#define IIO_NUM_ITTES 7 +#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) + +/* This struct is shared between the PROM and the kernel. + * Changes to this struct will require corresponding changes to the kernel. + */ +struct sn_flush_device_common { + int sfdl_bus; + int sfdl_slot; + int sfdl_pin; + struct common_bar_list { + unsigned long start; + unsigned long end; + } sfdl_bar_list[6]; + unsigned long sfdl_force_int_addr; + unsigned long sfdl_flush_value; + volatile unsigned long *sfdl_flush_addr; + u32 sfdl_persistent_busnum; + u32 sfdl_persistent_segment; + struct pcibus_info *sfdl_pcibus_info; +}; + +/* This struct is kernel only and is not used by the PROM */ +struct sn_flush_device_kernel { + spinlock_t sfdl_flush_lock; + struct sn_flush_device_common *common; +}; + +/* 01/16/06 This struct is the old PROM/kernel struct and needs to be included + * for older official PROMs to function on the new kernel base. This struct + * will be removed when the next official PROM release occurs. */ + +struct sn_flush_device_war { + struct sn_flush_device_common common; + u32 filler; /* older PROMs expect the default size of a spinlock_t */ +}; + +/* + * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel. + */ +struct sn_flush_nasid_entry { + struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num + u64 iio_itte[8]; +}; + +struct hubdev_info { + geoid_t hdi_geoid; + short hdi_nasid; + short hdi_peer_nasid; /* Dual Porting Peer */ + + struct sn_flush_nasid_entry hdi_flush_nasid_list; + struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1]; + + + void *hdi_nodepda; + void *hdi_node_vertex; + u32 max_segment_number; + u32 max_pcibus_number; +}; + +extern void hubdev_init_node(nodepda_t *, cnodeid_t); +extern void hub_error_init(struct hubdev_info *); +extern void ice_error_init(struct hubdev_info *); + + +#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/intr.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/intr.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,67 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_INTR_H +#define _ASM_IA64_SN_INTR_H + +#include <linux/rcupdate.h> +#include <asm/sn/types.h> + +#define SGI_UART_VECTOR 0xe9 + +/* Reserved IRQs : Note, not to exceed IA64_SN2_FIRST_DEVICE_VECTOR */ +#define SGI_XPC_ACTIVATE 0x30 +#define SGI_II_ERROR 0x31 +#define SGI_XBOW_ERROR 0x32 +#define SGI_PCIASIC_ERROR 0x33 +#define SGI_ACPI_SCI_INT 0x34 +#define SGI_TIOCA_ERROR 0x35 +#define SGI_TIO_ERROR 0x36 +#define SGI_TIOCX_ERROR 0x37 +#define SGI_MMTIMER_VECTOR 0x38 +#define SGI_XPC_NOTIFY 0xe7 + +#define IA64_SN2_FIRST_DEVICE_VECTOR 0x3c +#define IA64_SN2_LAST_DEVICE_VECTOR 0xe6 + +#define SN2_IRQ_RESERVED 0x1 +#define SN2_IRQ_CONNECTED 0x2 +#define SN2_IRQ_SHARED 0x4 + +// The SN PROM irq struct +struct sn_irq_info { + struct sn_irq_info *irq_next; /* deprecated DO NOT USE */ + short irq_nasid; /* Nasid IRQ is assigned to */ + int irq_slice; /* slice IRQ is assigned to */ + int irq_cpuid; /* kernel logical cpuid */ + int irq_irq; /* the IRQ number */ + int irq_int_bit; /* Bridge interrupt pin */ + /* <0 means MSI */ + u64 irq_xtalkaddr; /* xtalkaddr IRQ is sent to */ + int irq_bridge_type;/* pciio asic type (pciio.h) */ + void *irq_bridge; /* bridge generating irq */ + void *irq_pciioinfo; /* associated pciio_info_t */ + int irq_last_intr; /* For Shub lb lost intr WAR */ + int irq_cookie; /* unique cookie */ + int irq_flags; /* flags */ + int irq_share_cnt; /* num devices sharing IRQ */ + struct list_head list; /* list of sn_irq_info structs */ + struct rcu_head rcu; /* rcu callback list */ +}; + +extern void sn_send_IPI_phys(int, long, int, int); +extern u64 sn_intr_alloc(nasid_t, int, + struct sn_irq_info *, + int, nasid_t, int); +extern void sn_intr_free(nasid_t, int, struct sn_irq_info *); +extern struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *, nasid_t, int); +extern struct list_head **sn_irq_lh; + +#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector) + +#endif /* _ASM_IA64_SN_INTR_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/io.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/io.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,274 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_SN_IO_H +#define _ASM_SN_IO_H +#include <linux/compiler.h> +#include <asm/intrinsics.h> + +extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */ +extern void __sn_mmiowb(void); /* Forward definition */ + +extern int num_cnodes; + +#define __sn_mf_a() ia64_mfa() + +extern void sn_dma_flush(unsigned long); + +#define __sn_inb ___sn_inb +#define __sn_inw ___sn_inw +#define __sn_inl ___sn_inl +#define __sn_outb ___sn_outb +#define __sn_outw ___sn_outw +#define __sn_outl ___sn_outl +#define __sn_readb ___sn_readb +#define __sn_readw ___sn_readw +#define __sn_readl ___sn_readl +#define __sn_readq ___sn_readq +#define __sn_readb_relaxed ___sn_readb_relaxed +#define __sn_readw_relaxed ___sn_readw_relaxed +#define __sn_readl_relaxed ___sn_readl_relaxed +#define __sn_readq_relaxed ___sn_readq_relaxed + +/* + * Convenience macros for setting/clearing bits using the above accessors + */ + +#define __sn_setq_relaxed(addr, val) \ + writeq((__sn_readq_relaxed(addr) | (val)), (addr)) +#define __sn_clrq_relaxed(addr, val) \ + writeq((__sn_readq_relaxed(addr) & ~(val)), (addr)) + +/* + * The following routines are SN Platform specific, called when + * a reference is made to inX/outX set macros. SN Platform + * inX set of macros ensures that Posted DMA writes on the + * Bridge is flushed. + * + * The routines should be self explainatory. + */ + +static inline unsigned int +___sn_inb (unsigned long port) +{ + volatile unsigned char *addr; + unsigned char ret = -1; + + if ((addr = sn_io_addr(port))) { + ret = *addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + } + return ret; +} + +static inline unsigned int +___sn_inw (unsigned long port) +{ + volatile unsigned short *addr; + unsigned short ret = -1; + + if ((addr = sn_io_addr(port))) { + ret = *addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + } + return ret; +} + +static inline unsigned int +___sn_inl (unsigned long port) +{ + volatile unsigned int *addr; + unsigned int ret = -1; + + if ((addr = sn_io_addr(port))) { + ret = *addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + } + return ret; +} + +static inline void +___sn_outb (unsigned char val, unsigned long port) +{ + volatile unsigned char *addr; + + if ((addr = sn_io_addr(port))) { + *addr = val; + __sn_mmiowb(); + } +} + +static inline void +___sn_outw (unsigned short val, unsigned long port) +{ + volatile unsigned short *addr; + + if ((addr = sn_io_addr(port))) { + *addr = val; + __sn_mmiowb(); + } +} + +static inline void +___sn_outl (unsigned int val, unsigned long port) +{ + volatile unsigned int *addr; + + if ((addr = sn_io_addr(port))) { + *addr = val; + __sn_mmiowb(); + } +} + +/* + * The following routines are SN Platform specific, called when + * a reference is made to readX/writeX set macros. SN Platform + * readX set of macros ensures that Posted DMA writes on the + * Bridge is flushed. + * + * The routines should be self explainatory. + */ + +static inline unsigned char +___sn_readb (const volatile void __iomem *addr) +{ + unsigned char val; + + val = *(volatile unsigned char __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +static inline unsigned short +___sn_readw (const volatile void __iomem *addr) +{ + unsigned short val; + + val = *(volatile unsigned short __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +static inline unsigned int +___sn_readl (const volatile void __iomem *addr) +{ + unsigned int val; + + val = *(volatile unsigned int __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +static inline unsigned long +___sn_readq (const volatile void __iomem *addr) +{ + unsigned long val; + + val = *(volatile unsigned long __force *)addr; + __sn_mf_a(); + sn_dma_flush((unsigned long)addr); + return val; +} + +/* + * For generic and SN2 kernels, we have a set of fast access + * PIO macros. These macros are provided on SN Platform + * because the normal inX and readX macros perform an + * additional task of flushing Post DMA request on the Bridge. + * + * These routines should be self explainatory. + */ + +static inline unsigned int +sn_inb_fast (unsigned long port) +{ + volatile unsigned char *addr = (unsigned char *)port; + unsigned char ret; + + ret = *addr; + __sn_mf_a(); + return ret; +} + +static inline unsigned int +sn_inw_fast (unsigned long port) +{ + volatile unsigned short *addr = (unsigned short *)port; + unsigned short ret; + + ret = *addr; + __sn_mf_a(); + return ret; +} + +static inline unsigned int +sn_inl_fast (unsigned long port) +{ + volatile unsigned int *addr = (unsigned int *)port; + unsigned int ret; + + ret = *addr; + __sn_mf_a(); + return ret; +} + +static inline unsigned char +___sn_readb_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned char __force *)addr; +} + +static inline unsigned short +___sn_readw_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned short __force *)addr; +} + +static inline unsigned int +___sn_readl_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned int __force *) addr; +} + +static inline unsigned long +___sn_readq_relaxed (const volatile void __iomem *addr) +{ + return *(volatile unsigned long __force *) addr; +} + +struct pci_dev; + +static inline int +sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan) +{ + + if (vchan > 1) { + return -1; + } + + if (!(*addr >> 32)) /* Using a mask here would be cleaner */ + return 0; /* but this generates better code */ + + if (vchan == 1) { + /* Set Bit 57 */ + *addr |= (1UL << 57); + } else { + /* Clear Bit 57 */ + *addr &= ~(1UL << 57); + } + + return 0; +} + +#endif /* _ASM_SN_IO_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/nodepda.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,83 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_NODEPDA_H +#define _ASM_IA64_SN_NODEPDA_H + + +#include <asm/semaphore.h> +#include <asm/irq.h> +#include <asm/sn/arch.h> +#include <asm/sn/intr.h> +#include <asm/sn/bte.h> + +/* + * NUMA Node-Specific Data structures are defined in this file. + * In particular, this is the location of the node PDA. + * A pointer to the right node PDA is saved in each CPU PDA. + */ + +/* + * Node-specific data structure. + * + * One of these structures is allocated on each node of a NUMA system. + * + * This structure provides a convenient way of keeping together + * all per-node data structures. + */ +struct phys_cpuid { + short nasid; + char subnode; + char slice; +}; + +struct nodepda_s { + void *pdinfo; /* Platform-dependent per-node info */ + + /* + * The BTEs on this node are shared by the local cpus + */ + struct bteinfo_s bte_if[MAX_BTES_PER_NODE]; /* Virtual Interface */ + struct timer_list bte_recovery_timer; + spinlock_t bte_recovery_lock; + + /* + * Array of pointers to the nodepdas for each node. + */ + struct nodepda_s *pernode_pdaindr[MAX_COMPACT_NODES]; + + /* + * Array of physical cpu identifiers. Indexed by cpuid. + */ + struct phys_cpuid phys_cpuid[NR_CPUS]; + spinlock_t ptc_lock ____cacheline_aligned_in_smp; +}; + +typedef struct nodepda_s nodepda_t; + +/* + * Access Functions for node PDA. + * Since there is one nodepda for each node, we need a convenient mechanism + * to access these nodepdas without cluttering code with #ifdefs. + * The next set of definitions provides this. + * Routines are expected to use + * + * sn_nodepda - to access node PDA for the node on which code is running + * NODEPDA(cnodeid) - to access node PDA for cnodeid + */ + +DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); +#define sn_nodepda (__get_cpu_var(__sn_nodepda)) +#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) + +/* + * Check if given a compact node id the corresponding node has all the + * cpus disabled. + */ +#define is_headless_node(cnodeid) (nr_cpus_node(cnodeid) == 0) + +#endif /* _ASM_IA64_SN_NODEPDA_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/pcibr_provider.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,149 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992-1997,2000-2006 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H +#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H + +#include <asm/sn/intr.h> +#include <asm/sn/pcibus_provider_defs.h> + +/* Workarounds */ +#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */ + +#define BUSTYPE_MASK 0x1 + +/* Macros given a pcibus structure */ +#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK) +#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \ + asic == PCIIO_ASIC_TYPE_TIOCP) +#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC) + + +/* + * The different PCI Bridge types supported on the SGI Altix platforms + */ +#define PCIBR_BRIDGETYPE_UNKNOWN -1 +#define PCIBR_BRIDGETYPE_PIC 2 +#define PCIBR_BRIDGETYPE_TIOCP 3 + +/* + * Bridge 64bit Direct Map Attributes + */ +#define PCI64_ATTR_PREF (1ull << 59) +#define PCI64_ATTR_PREC (1ull << 58) +#define PCI64_ATTR_VIRTUAL (1ull << 57) +#define PCI64_ATTR_BAR (1ull << 56) +#define PCI64_ATTR_SWAP (1ull << 55) +#define PCI64_ATTR_VIRTUAL1 (1ull << 54) + +#define PCI32_LOCAL_BASE 0 +#define PCI32_MAPPED_BASE 0x40000000 +#define PCI32_DIRECT_BASE 0x80000000 + +#define IS_PCI32_MAPPED(x) ((u64)(x) < PCI32_DIRECT_BASE && \ + (u64)(x) >= PCI32_MAPPED_BASE) +#define IS_PCI32_DIRECT(x) ((u64)(x) >= PCI32_MAPPED_BASE) + + +/* + * Bridge PMU Address Transaltion Entry Attibutes + */ +#define PCI32_ATE_V (0x1 << 0) +#define PCI32_ATE_CO (0x1 << 1) +#define PCI32_ATE_PREC (0x1 << 2) +#define PCI32_ATE_MSI (0x1 << 2) +#define PCI32_ATE_PREF (0x1 << 3) +#define PCI32_ATE_BAR (0x1 << 4) +#define PCI32_ATE_ADDR_SHFT 12 + +#define MINIMAL_ATES_REQUIRED(addr, size) \ + (IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1)) + +#define MINIMAL_ATE_FLAG(addr, size) \ + (MINIMAL_ATES_REQUIRED((u64)addr, size) ? 1 : 0) + +/* bit 29 of the pci address is the SWAP bit */ +#define ATE_SWAPSHIFT 29 +#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT)) +#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT)) + +/* + * I/O page size + */ +#if PAGE_SIZE < 16384 +#define IOPFNSHIFT 12 /* 4K per mapped page */ +#else +#define IOPFNSHIFT 14 /* 16K per mapped page */ +#endif + +#define IOPGSIZE (1 << IOPFNSHIFT) +#define IOPG(x) ((x) >> IOPFNSHIFT) +#define IOPGOFF(x) ((x) & (IOPGSIZE-1)) + +#define PCIBR_DEV_SWAP_DIR (1ull << 19) +#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21) + +/* + * PMU resources. + */ +struct ate_resource{ + u64 *ate; + u64 num_ate; + u64 lowest_free_index; +}; + +struct pcibus_info { + struct pcibus_bussoft pbi_buscommon; /* common header */ + u32 pbi_moduleid; + short pbi_bridge_type; + short pbi_bridge_mode; + + struct ate_resource pbi_int_ate_resource; + u64 pbi_int_ate_size; + + u64 pbi_dir_xbase; + char pbi_hub_xid; + + u64 pbi_devreg[8]; + + u32 pbi_valid_devices; + u32 pbi_enabled_devices; + + spinlock_t pbi_lock; +}; + +extern int pcibr_init_provider(void); +extern void *pcibr_bus_fixup(struct pcibus_bussoft *, struct pci_controller *); +extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t, int type); +extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t, int type); +extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int); + +/* + * prototypes for the bridge asic register access routines in pcibr_reg.c + */ +extern void pcireg_control_bit_clr(struct pcibus_info *, u64); +extern void pcireg_control_bit_set(struct pcibus_info *, u64); +extern u64 pcireg_tflush_get(struct pcibus_info *); +extern u64 pcireg_intr_status_get(struct pcibus_info *); +extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, u64); +extern void pcireg_intr_enable_bit_set(struct pcibus_info *, u64); +extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, u64); +extern void pcireg_force_intr_set(struct pcibus_info *, int); +extern u64 pcireg_wrb_flush_get(struct pcibus_info *, int); +extern void pcireg_int_ate_set(struct pcibus_info *, int, u64); +extern u64 __iomem * pcireg_int_ate_addr(struct pcibus_info *, int); +extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info); +extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info); +extern int pcibr_ate_alloc(struct pcibus_info *, int); +extern void pcibr_ate_free(struct pcibus_info *, int); +extern void ate_write(struct pcibus_info *, int, int, u64); +extern int sal_pcibr_slot_enable(struct pcibus_info *soft, int device, + void *resp); +extern int sal_pcibr_slot_disable(struct pcibus_info *soft, int device, + int action, void *resp); +extern u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus); +#endif diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/rw_mmr.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,28 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2002-2006 Silicon Graphics, Inc. All Rights Reserved. + */ +#ifndef _ASM_IA64_SN_RW_MMR_H +#define _ASM_IA64_SN_RW_MMR_H + + +/* + * This file that access MMRs via uncached physical addresses. + * pio_phys_read_mmr - read an MMR + * pio_phys_write_mmr - write an MMR + * pio_atomic_phys_write_mmrs - atomically write 1 or 2 MMRs with psr.ic=0 + * Second MMR will be skipped if address is NULL + * + * Addresses passed to these routines should be uncached physical addresses + * ie., 0x80000.... + */ + + +extern long pio_phys_read_mmr(volatile long *mmr); +extern void pio_phys_write_mmr(volatile long *mmr, long val); +extern void pio_atomic_phys_write_mmrs(volatile long *mmr1, long val1, volatile long *mmr2, long val2); + +#endif /* _ASM_IA64_SN_RW_MMR_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/asm/sn/types.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/asm/sn/types.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,26 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 1999 by Ralf Baechle + */ +#ifndef _ASM_IA64_SN_TYPES_H +#define _ASM_IA64_SN_TYPES_H + +#include <linux/types.h> + +typedef unsigned long cpuid_t; +typedef signed short nasid_t; /* node id in numa-as-id space */ +typedef signed char partid_t; /* partition ID type */ +typedef unsigned int moduleid_t; /* user-visible module number type */ +typedef unsigned int cmoduleid_t; /* kernel compact module id type */ +typedef unsigned char slotid_t; /* slot (blade) within module */ +typedef unsigned char slabid_t; /* slab (asic) within slot */ +typedef u64 nic_t; +typedef unsigned long iopaddr_t; +typedef unsigned long paddr_t; +typedef short cnodeid_t; + +#endif /* _ASM_IA64_SN_TYPES_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/linux/README.origin --- a/xen/include/asm-ia64/linux-xen/linux/README.origin Tue Dec 12 15:25:02 2006 -0700 +++ b/xen/include/asm-ia64/linux-xen/linux/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -12,3 +12,8 @@ interrupt.h -> linux/include/linux/int # The files below are from Linux-2.6.16.33 oprofile.h -> linux/include/linux/oprofile.h + +# The files below are from Linux-2.6.19 +pci.h -> linux/include/linux/pci.h +kobject.h -> linux/include/linux/kobject.h +device.h -> linux/include/linux/device.h diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/linux/device.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/linux/device.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,473 @@ +/* + * device.h - generic, centralized driver model + * + * Copyright (c) 2001-2003 Patrick Mochel <mochel@xxxxxxxx> + * + * This file is released under the GPLv2 + * + * See Documentation/driver-model/ for more information. + */ + +#ifndef _DEVICE_H_ +#define _DEVICE_H_ + +#include <linux/ioport.h> +#include <linux/kobject.h> +#include <linux/klist.h> +#include <linux/list.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pm.h> +#include <asm/semaphore.h> +#include <asm/atomic.h> + +#define DEVICE_NAME_SIZE 50 +#define DEVICE_NAME_HALF __stringify(20) /* Less than half to accommodate slop */ +#define DEVICE_ID_SIZE 32 +#define BUS_ID_SIZE KOBJ_NAME_LEN + + +struct device; +struct device_driver; +struct class; +struct class_device; + +struct bus_type { + const char * name; + + struct subsystem subsys; + struct kset drivers; + struct kset devices; + struct klist klist_devices; + struct klist klist_drivers; + + struct bus_attribute * bus_attrs; + struct device_attribute * dev_attrs; + struct driver_attribute * drv_attrs; + + int (*match)(struct device * dev, struct device_driver * drv); + int (*uevent)(struct device *dev, char **envp, + int num_envp, char *buffer, int buffer_size); + int (*probe)(struct device * dev); + int (*remove)(struct device * dev); + void (*shutdown)(struct device * dev); + + int (*suspend)(struct device * dev, pm_message_t state); + int (*suspend_late)(struct device * dev, pm_message_t state); + int (*resume_early)(struct device * dev); + int (*resume)(struct device * dev); +}; + +extern int __must_check bus_register(struct bus_type * bus); +extern void bus_unregister(struct bus_type * bus); + +extern int __must_check bus_rescan_devices(struct bus_type * bus); + +/* iterator helpers for buses */ + +int bus_for_each_dev(struct bus_type * bus, struct device * start, void * data, + int (*fn)(struct device *, void *)); +struct device * bus_find_device(struct bus_type *bus, struct device *start, + void *data, int (*match)(struct device *, void *)); + +int __must_check bus_for_each_drv(struct bus_type *bus, + struct device_driver *start, void *data, + int (*fn)(struct device_driver *, void *)); + +/* driverfs interface for exporting bus attributes */ + +struct bus_attribute { + struct attribute attr; + ssize_t (*show)(struct bus_type *, char * buf); + ssize_t (*store)(struct bus_type *, const char * buf, size_t count); +}; + +#define BUS_ATTR(_name,_mode,_show,_store) \ +struct bus_attribute bus_attr_##_name = __ATTR(_name,_mode,_show,_store) + +extern int __must_check bus_create_file(struct bus_type *, + struct bus_attribute *); +extern void bus_remove_file(struct bus_type *, struct bus_attribute *); + +struct device_driver { + const char * name; + struct bus_type * bus; + + struct completion unloaded; + struct kobject kobj; + struct klist klist_devices; + struct klist_node knode_bus; + + struct module * owner; + + int (*probe) (struct device * dev); + int (*remove) (struct device * dev); + void (*shutdown) (struct device * dev); + int (*suspend) (struct device * dev, pm_message_t state); + int (*resume) (struct device * dev); + + unsigned int multithread_probe:1; +}; + + +extern int __must_check driver_register(struct device_driver * drv); +extern void driver_unregister(struct device_driver * drv); + +extern struct device_driver * get_driver(struct device_driver * drv); +extern void put_driver(struct device_driver * drv); +extern struct device_driver *driver_find(const char *name, struct bus_type *bus); +extern int driver_probe_done(void); + +/* driverfs interface for exporting driver attributes */ + +struct driver_attribute { + struct attribute attr; + ssize_t (*show)(struct device_driver *, char * buf); + ssize_t (*store)(struct device_driver *, const char * buf, size_t count); +}; + +#define DRIVER_ATTR(_name,_mode,_show,_store) \ +struct driver_attribute driver_attr_##_name = __ATTR(_name,_mode,_show,_store) + +extern int __must_check driver_create_file(struct device_driver *, + struct driver_attribute *); +extern void driver_remove_file(struct device_driver *, struct driver_attribute *); + +extern int __must_check driver_for_each_device(struct device_driver * drv, + struct device *start, void *data, + int (*fn)(struct device *, void *)); +struct device * driver_find_device(struct device_driver *drv, + struct device *start, void *data, + int (*match)(struct device *, void *)); + +/* + * device classes + */ +struct class { + const char * name; + struct module * owner; + + struct subsystem subsys; + struct list_head children; + struct list_head devices; + struct list_head interfaces; + struct semaphore sem; /* locks both the children and interfaces lists */ + + struct kobject *virtual_dir; + + struct class_attribute * class_attrs; + struct class_device_attribute * class_dev_attrs; + struct device_attribute * dev_attrs; + + int (*uevent)(struct class_device *dev, char **envp, + int num_envp, char *buffer, int buffer_size); + int (*dev_uevent)(struct device *dev, char **envp, int num_envp, + char *buffer, int buffer_size); + + void (*release)(struct class_device *dev); + void (*class_release)(struct class *class); + void (*dev_release)(struct device *dev); + + int (*suspend)(struct device *, pm_message_t state); + int (*resume)(struct device *); +}; + +extern int __must_check class_register(struct class *); +extern void class_unregister(struct class *); + + +struct class_attribute { + struct attribute attr; + ssize_t (*show)(struct class *, char * buf); + ssize_t (*store)(struct class *, const char * buf, size_t count); +}; + +#define CLASS_ATTR(_name,_mode,_show,_store) \ +struct class_attribute class_attr_##_name = __ATTR(_name,_mode,_show,_store) + +extern int __must_check class_create_file(struct class *, + const struct class_attribute *); +extern void class_remove_file(struct class *, const struct class_attribute *); + +struct class_device_attribute { + struct attribute attr; + ssize_t (*show)(struct class_device *, char * buf); + ssize_t (*store)(struct class_device *, const char * buf, size_t count); +}; + +#define CLASS_DEVICE_ATTR(_name,_mode,_show,_store) \ +struct class_device_attribute class_device_attr_##_name = \ + __ATTR(_name,_mode,_show,_store) + +extern int __must_check class_device_create_file(struct class_device *, + const struct class_device_attribute *); + +/** + * struct class_device - class devices + * @class: pointer to the parent class for this class device. This is required. + * @devt: for internal use by the driver core only. + * @node: for internal use by the driver core only. + * @kobj: for internal use by the driver core only. + * @devt_attr: for internal use by the driver core only. + * @groups: optional additional groups to be created + * @dev: if set, a symlink to the struct device is created in the sysfs + * directory for this struct class device. + * @class_data: pointer to whatever you want to store here for this struct + * class_device. Use class_get_devdata() and class_set_devdata() to get and + * set this pointer. + * @parent: pointer to a struct class_device that is the parent of this struct + * class_device. If NULL, this class_device will show up at the root of the + * struct class in sysfs (which is probably what you want to have happen.) + * @release: pointer to a release function for this struct class_device. If + * set, this will be called instead of the class specific release function. + * Only use this if you want to override the default release function, like + * when you are nesting class_device structures. + * @uevent: pointer to a uevent function for this struct class_device. If + * set, this will be called instead of the class specific uevent function. + * Only use this if you want to override the default uevent function, like + * when you are nesting class_device structures. + */ +struct class_device { + struct list_head node; + + struct kobject kobj; + struct class * class; /* required */ + dev_t devt; /* dev_t, creates the sysfs "dev" */ + struct class_device_attribute *devt_attr; + struct class_device_attribute uevent_attr; + struct device * dev; /* not necessary, but nice to have */ + void * class_data; /* class-specific data */ + struct class_device *parent; /* parent of this child device, if there is one */ + struct attribute_group ** groups; /* optional groups */ + + void (*release)(struct class_device *dev); + int (*uevent)(struct class_device *dev, char **envp, + int num_envp, char *buffer, int buffer_size); + char class_id[BUS_ID_SIZE]; /* unique to this class */ +}; + +static inline void * +class_get_devdata (struct class_device *dev) +{ + return dev->class_data; +} + +static inline void +class_set_devdata (struct class_device *dev, void *data) +{ + dev->class_data = data; +} + + +extern int __must_check class_device_register(struct class_device *); +extern void class_device_unregister(struct class_device *); +extern void class_device_initialize(struct class_device *); +extern int __must_check class_device_add(struct class_device *); +extern void class_device_del(struct class_device *); + +extern int class_device_rename(struct class_device *, char *); + +extern struct class_device * class_device_get(struct class_device *); +extern void class_device_put(struct class_device *); + +extern void class_device_remove_file(struct class_device *, + const struct class_device_attribute *); +extern int __must_check class_device_create_bin_file(struct class_device *, + struct bin_attribute *); +extern void class_device_remove_bin_file(struct class_device *, + struct bin_attribute *); + +struct class_interface { + struct list_head node; + struct class *class; + + int (*add) (struct class_device *, struct class_interface *); + void (*remove) (struct class_device *, struct class_interface *); + int (*add_dev) (struct device *, struct class_interface *); + void (*remove_dev) (struct device *, struct class_interface *); +}; + +extern int __must_check class_interface_register(struct class_interface *); +extern void class_interface_unregister(struct class_interface *); + +extern struct class *class_create(struct module *owner, const char *name); +extern void class_destroy(struct class *cls); +extern struct class_device *class_device_create(struct class *cls, + struct class_device *parent, + dev_t devt, + struct device *device, + const char *fmt, ...) + __attribute__((format(printf,5,6))); +extern void class_device_destroy(struct class *cls, dev_t devt); + +/* interface for exporting device attributes */ +struct device_attribute { + struct attribute attr; + ssize_t (*show)(struct device *dev, struct device_attribute *attr, + char *buf); + ssize_t (*store)(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +}; + +#define DEVICE_ATTR(_name,_mode,_show,_store) \ +struct device_attribute dev_attr_##_name = __ATTR(_name,_mode,_show,_store) + +extern int __must_check device_create_file(struct device *device, + struct device_attribute * entry); +extern void device_remove_file(struct device * dev, struct device_attribute * attr); +extern int __must_check device_create_bin_file(struct device *dev, + struct bin_attribute *attr); +extern void device_remove_bin_file(struct device *dev, + struct bin_attribute *attr); +struct device { + struct klist klist_children; + struct klist_node knode_parent; /* node in sibling list */ + struct klist_node knode_driver; + struct klist_node knode_bus; + struct device * parent; + + struct kobject kobj; + char bus_id[BUS_ID_SIZE]; /* position on parent bus */ + unsigned is_registered:1; + struct device_attribute uevent_attr; + struct device_attribute *devt_attr; + + struct semaphore sem; /* semaphore to synchronize calls to + * its driver. + */ + + struct bus_type * bus; /* type of bus device is on */ + struct device_driver *driver; /* which driver has allocated this + device */ + void *driver_data; /* data private to the driver */ + void *platform_data; /* Platform specific data, device + core doesn't touch it */ + void *firmware_data; /* Firmware specific data (e.g. ACPI, + BIOS data),reserved for device core*/ + struct dev_pm_info power; + + u64 *dma_mask; /* dma mask (if dma'able device) */ + u64 coherent_dma_mask;/* Like dma_mask, but for + alloc_coherent mappings as + not all hardware supports + 64 bit addresses for consistent + allocations such descriptors. */ + + struct list_head dma_pools; /* dma pools (if dma'ble) */ + + struct dma_coherent_mem *dma_mem; /* internal for coherent mem + override */ + + /* class_device migration path */ + struct list_head node; + struct class *class; /* optional*/ + dev_t devt; /* dev_t, creates the sysfs "dev" */ + struct attribute_group **groups; /* optional groups */ + + void (*release)(struct device * dev); +}; + +static inline void * +dev_get_drvdata (struct device *dev) +{ + return dev->driver_data; +} + +static inline void +dev_set_drvdata (struct device *dev, void *data) +{ + dev->driver_data = data; +} + +static inline int device_is_registered(struct device *dev) +{ + return dev->is_registered; +} + +/* + * High level routines for use by the bus drivers + */ +extern int __must_check device_register(struct device * dev); +extern void device_unregister(struct device * dev); +extern void device_initialize(struct device * dev); +extern int __must_check device_add(struct device * dev); +extern void device_del(struct device * dev); +extern int device_for_each_child(struct device *, void *, + int (*fn)(struct device *, void *)); +extern int device_rename(struct device *dev, char *new_name); + +/* + * Manual binding of a device to driver. See drivers/base/bus.c + * for information on use. + */ +extern int __must_check device_bind_driver(struct device *dev); +extern void device_release_driver(struct device * dev); +extern int __must_check device_attach(struct device * dev); +extern int __must_check driver_attach(struct device_driver *drv); +extern int __must_check device_reprobe(struct device *dev); + +/* + * Easy functions for dynamically creating devices on the fly + */ +extern struct device *device_create(struct class *cls, struct device *parent, + dev_t devt, const char *fmt, ...) + __attribute__((format(printf,4,5))); +extern void device_destroy(struct class *cls, dev_t devt); + +extern int virtual_device_parent(struct device *dev); + +/* + * Platform "fixup" functions - allow the platform to have their say + * about devices and actions that the general device layer doesn't + * know about. + */ +/* Notify platform of device discovery */ +extern int (*platform_notify)(struct device * dev); + +extern int (*platform_notify_remove)(struct device * dev); + + +/** + * get_device - atomically increment the reference count for the device. + * + */ +extern struct device * get_device(struct device * dev); +extern void put_device(struct device * dev); + + +/* drivers/base/power/shutdown.c */ +extern void device_shutdown(void); + + +/* drivers/base/firmware.c */ +extern int __must_check firmware_register(struct subsystem *); +extern void firmware_unregister(struct subsystem *); + +/* debugging and troubleshooting/diagnostic helpers. */ +extern const char *dev_driver_string(struct device *dev); +#define dev_printk(level, dev, format, arg...) \ + printk(level "%s %s: " format , dev_driver_string(dev) , (dev)->bus_id , ## arg) + +#ifdef DEBUG +#define dev_dbg(dev, format, arg...) \ + dev_printk(KERN_DEBUG , dev , format , ## arg) +#else +#define dev_dbg(dev, format, arg...) do { (void)(dev); } while (0) +#endif + +#define dev_err(dev, format, arg...) \ + dev_printk(KERN_ERR , dev , format , ## arg) +#define dev_info(dev, format, arg...) \ + dev_printk(KERN_INFO , dev , format , ## arg) +#define dev_warn(dev, format, arg...) \ + dev_printk(KERN_WARNING , dev , format , ## arg) +#define dev_notice(dev, format, arg...) \ + dev_printk(KERN_NOTICE , dev , format , ## arg) + +/* Create alias, so I can be autoloaded. */ +#define MODULE_ALIAS_CHARDEV(major,minor) \ + MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) +#define MODULE_ALIAS_CHARDEV_MAJOR(major) \ + MODULE_ALIAS("char-major-" __stringify(major) "-*") +#endif /* _DEVICE_H_ */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/linux/kobject.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/linux/kobject.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,282 @@ +/* + * kobject.h - generic kernel object infrastructure. + * + * Copyright (c) 2002-2003 Patrick Mochel + * Copyright (c) 2002-2003 Open Source Development Labs + * + * This file is released under the GPLv2. + * + * + * Please read Documentation/kobject.txt before using the kobject + * interface, ESPECIALLY the parts about reference counts and object + * destructors. + */ + +#ifndef _KOBJECT_H_ +#define _KOBJECT_H_ + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/sysfs.h> +#include <linux/compiler.h> +#include <linux/spinlock.h> +#include <linux/rwsem.h> +#include <linux/kref.h> +#include <linux/kernel.h> +#include <linux/wait.h> +#include <asm/atomic.h> + +#define KOBJ_NAME_LEN 20 +#define UEVENT_HELPER_PATH_LEN 256 + +/* path to the userspace helper executed on an event */ +extern char uevent_helper[]; + +/* counter to tag the uevent, read only except for the kobject core */ +extern u64 uevent_seqnum; + +/* the actions here must match the proper string in lib/kobject_uevent.c */ +typedef int __bitwise kobject_action_t; +enum kobject_action { + KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */ + KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */ + KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */ + KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */ + KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ + KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ + KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ +}; + +struct kobject { + const char * k_name; + char name[KOBJ_NAME_LEN]; + struct kref kref; + struct list_head entry; + struct kobject * parent; + struct kset * kset; + struct kobj_type * ktype; + struct dentry * dentry; + wait_queue_head_t poll; +}; + +extern int kobject_set_name(struct kobject *, const char *, ...) + __attribute__((format(printf,2,3))); + +static inline const char * kobject_name(const struct kobject * kobj) +{ + return kobj->k_name; +} + +extern void kobject_init(struct kobject *); +extern void kobject_cleanup(struct kobject *); + +extern int __must_check kobject_add(struct kobject *); +extern void kobject_del(struct kobject *); + +extern int __must_check kobject_rename(struct kobject *, const char *new_name); + +extern int __must_check kobject_register(struct kobject *); +extern void kobject_unregister(struct kobject *); + +extern struct kobject * kobject_get(struct kobject *); +extern void kobject_put(struct kobject *); + +extern struct kobject *kobject_add_dir(struct kobject *, const char *); + +extern char * kobject_get_path(struct kobject *, gfp_t); + +struct kobj_type { + void (*release)(struct kobject *); + struct sysfs_ops * sysfs_ops; + struct attribute ** default_attrs; +}; + + +/** + * kset - a set of kobjects of a specific type, belonging + * to a specific subsystem. + * + * All kobjects of a kset should be embedded in an identical + * type. This type may have a descriptor, which the kset points + * to. This allows there to exist sets of objects of the same + * type in different subsystems. + * + * A subsystem does not have to be a list of only one type + * of object; multiple ksets can belong to one subsystem. All + * ksets of a subsystem share the subsystem's lock. + * + * Each kset can support specific event variables; it can + * supress the event generation or add subsystem specific + * variables carried with the event. + */ +struct kset_uevent_ops { + int (*filter)(struct kset *kset, struct kobject *kobj); + const char *(*name)(struct kset *kset, struct kobject *kobj); + int (*uevent)(struct kset *kset, struct kobject *kobj, char **envp, + int num_envp, char *buffer, int buffer_size); +}; + +struct kset { + struct subsystem * subsys; + struct kobj_type * ktype; + struct list_head list; + spinlock_t list_lock; + struct kobject kobj; + struct kset_uevent_ops * uevent_ops; +}; + + +extern void kset_init(struct kset * k); +extern int __must_check kset_add(struct kset * k); +extern int __must_check kset_register(struct kset * k); +extern void kset_unregister(struct kset * k); + +static inline struct kset * to_kset(struct kobject * kobj) +{ + return kobj ? container_of(kobj,struct kset,kobj) : NULL; +} + +static inline struct kset * kset_get(struct kset * k) +{ + return k ? to_kset(kobject_get(&k->kobj)) : NULL; +} + +static inline void kset_put(struct kset * k) +{ + kobject_put(&k->kobj); +} + +static inline struct kobj_type * get_ktype(struct kobject * k) +{ + if (k->kset && k->kset->ktype) + return k->kset->ktype; + else + return k->ktype; +} + +extern struct kobject * kset_find_obj(struct kset *, const char *); + + +/** + * Use this when initializing an embedded kset with no other + * fields to initialize. + */ +#define set_kset_name(str) .kset = { .kobj = { .name = str } } + + + +struct subsystem { + struct kset kset; + struct rw_semaphore rwsem; +}; + +#define decl_subsys(_name,_type,_uevent_ops) \ +struct subsystem _name##_subsys = { \ + .kset = { \ + .kobj = { .name = __stringify(_name) }, \ + .ktype = _type, \ + .uevent_ops =_uevent_ops, \ + } \ +} +#define decl_subsys_name(_varname,_name,_type,_uevent_ops) \ +struct subsystem _varname##_subsys = { \ + .kset = { \ + .kobj = { .name = __stringify(_name) }, \ + .ktype = _type, \ + .uevent_ops =_uevent_ops, \ + } \ +} + +/* The global /sys/kernel/ subsystem for people to chain off of */ +extern struct subsystem kernel_subsys; +/* The global /sys/hypervisor/ subsystem */ +extern struct subsystem hypervisor_subsys; + +/** + * Helpers for setting the kset of registered objects. + * Often, a registered object belongs to a kset embedded in a + * subsystem. These do no magic, just make the resulting code + * easier to follow. + */ + +/** + * kobj_set_kset_s(obj,subsys) - set kset for embedded kobject. + * @obj: ptr to some object type. + * @subsys: a subsystem object (not a ptr). + * + * Can be used for any object type with an embedded ->kobj. + */ + +#define kobj_set_kset_s(obj,subsys) \ + (obj)->kobj.kset = &(subsys).kset + +/** + * kset_set_kset_s(obj,subsys) - set kset for embedded kset. + * @obj: ptr to some object type. + * @subsys: a subsystem object (not a ptr). + * + * Can be used for any object type with an embedded ->kset. + * Sets the kset of @obj's embedded kobject (via its embedded + * kset) to @subsys.kset. This makes @obj a member of that + * kset. + */ + +#define kset_set_kset_s(obj,subsys) \ + (obj)->kset.kobj.kset = &(subsys).kset + +/** + * subsys_set_kset(obj,subsys) - set kset for subsystem + * @obj: ptr to some object type. + * @subsys: a subsystem object (not a ptr). + * + * Can be used for any object type with an embedded ->subsys. + * Sets the kset of @obj's kobject to @subsys.kset. This makes + * the object a member of that kset. + */ + +#define subsys_set_kset(obj,_subsys) \ + (obj)->subsys.kset.kobj.kset = &(_subsys).kset + +extern void subsystem_init(struct subsystem *); +extern int __must_check subsystem_register(struct subsystem *); +extern void subsystem_unregister(struct subsystem *); + +static inline struct subsystem * subsys_get(struct subsystem * s) +{ + return s ? container_of(kset_get(&s->kset),struct subsystem,kset) : NULL; +} + +static inline void subsys_put(struct subsystem * s) +{ + kset_put(&s->kset); +} + +struct subsys_attribute { + struct attribute attr; + ssize_t (*show)(struct subsystem *, char *); + ssize_t (*store)(struct subsystem *, const char *, size_t); +}; + +extern int __must_check subsys_create_file(struct subsystem * , + struct subsys_attribute *); + +#if defined(CONFIG_HOTPLUG) +void kobject_uevent(struct kobject *kobj, enum kobject_action action); + +int add_uevent_var(char **envp, int num_envp, int *cur_index, + char *buffer, int buffer_size, int *cur_len, + const char *format, ...) + __attribute__((format (printf, 7, 8))); +#else +static inline void kobject_uevent(struct kobject *kobj, enum kobject_action action) { } + +static inline int add_uevent_var(char **envp, int num_envp, int *cur_index, + char *buffer, int buffer_size, int *cur_len, + const char *format, ...) +{ return 0; } +#endif + +#endif /* __KERNEL__ */ +#endif /* _KOBJECT_H_ */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux-xen/linux/pci.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux-xen/linux/pci.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,817 @@ +/* + * pci.h + * + * PCI defines and function prototypes + * Copyright 1994, Drew Eckhardt + * Copyright 1997--1999 Martin Mares <mj@xxxxxx> + * + * For more information, please consult the following manuals (look at + * http://www.pcisig.com/ for how to get them): + * + * PCI BIOS Specification + * PCI Local Bus Specification + * PCI to PCI Bridge Specification + * PCI System Design Guide + */ + +#ifndef LINUX_PCI_H +#define LINUX_PCI_H + +/* Include the pci register defines */ +#include <linux/pci_regs.h> + +/* Include the ID list */ +#include <linux/pci_ids.h> + +/* + * The PCI interface treats multi-function devices as independent + * devices. The slot/function address of each device is encoded + * in a single byte as follows: + * + * 7:3 = slot + * 2:0 = function + */ +#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) +#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) +#define PCI_FUNC(devfn) ((devfn) & 0x07) + +/* Ioctls for /proc/bus/pci/X/Y nodes. */ +#define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) +#define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ +#define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ +#define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ +#define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ + +#ifdef __KERNEL__ + +#include <linux/mod_devicetable.h> + +#include <linux/types.h> +#include <linux/ioport.h> +#include <linux/list.h> +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/device.h> + +/* File state for mmap()s on /proc/bus/pci/X/Y */ +enum pci_mmap_state { + pci_mmap_io, + pci_mmap_mem +}; + +/* This defines the direction arg to the DMA mapping routines. */ +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define PCI_DMA_NONE 3 + +#define DEVICE_COUNT_COMPATIBLE 4 +#define DEVICE_COUNT_RESOURCE 12 + +typedef int __bitwise pci_power_t; + +#define PCI_D0 ((pci_power_t __force) 0) +#define PCI_D1 ((pci_power_t __force) 1) +#define PCI_D2 ((pci_power_t __force) 2) +#define PCI_D3hot ((pci_power_t __force) 3) +#define PCI_D3cold ((pci_power_t __force) 4) +#define PCI_UNKNOWN ((pci_power_t __force) 5) +#define PCI_POWER_ERROR ((pci_power_t __force) -1) + +/** The pci_channel state describes connectivity between the CPU and + * the pci device. If some PCI bus between here and the pci device + * has crashed or locked up, this info is reflected here. + */ +typedef unsigned int __bitwise pci_channel_state_t; + +enum pci_channel_state { + /* I/O channel is in normal state */ + pci_channel_io_normal = (__force pci_channel_state_t) 1, + + /* I/O to channel is blocked */ + pci_channel_io_frozen = (__force pci_channel_state_t) 2, + + /* PCI card is dead */ + pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, +}; + +typedef unsigned short __bitwise pci_bus_flags_t; +enum pci_bus_flags { + PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, +}; + +struct pci_cap_saved_state { + struct hlist_node next; + char cap_nr; + u32 data[0]; +}; + +/* + * The pci_dev structure is used to describe PCI devices. + */ +struct pci_dev { + struct list_head global_list; /* node in list of all PCI devices */ + struct list_head bus_list; /* node in per-bus list */ + struct pci_bus *bus; /* bus this device is on */ + struct pci_bus *subordinate; /* bus this device bridges to */ + + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ + + unsigned int devfn; /* encoded device & function index */ + unsigned short vendor; + unsigned short device; + unsigned short subsystem_vendor; + unsigned short subsystem_device; + unsigned int class; /* 3 bytes: (base,sub,prog-if) */ + u8 hdr_type; /* PCI header type (`multi' flag masked out) */ + u8 rom_base_reg; /* which config register controls the ROM */ + u8 pin; /* which interrupt pin this device uses */ + + struct pci_driver *driver; /* which driver has allocated this device */ + u64 dma_mask; /* Mask of the bits of bus address this + device implements. Normally this is + 0xffffffff. You only need to change + this if your device has broken DMA + or supports 64-bit transfers. */ + + pci_power_t current_state; /* Current operating state. In ACPI-speak, + this is D0-D3, D0 being fully functional, + and D3 being off. */ + + pci_channel_state_t error_state; /* current connectivity state */ + struct device dev; /* Generic device interface */ + + /* device is compatible with these IDs */ + unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE]; + unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE]; + + int cfg_size; /* Size of configuration space */ + + /* + * Instead of touching interrupt line and base address registers + * directly, use the values stored here. They might be different! + */ + unsigned int irq; + struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ + + /* These fields are used by common fixups */ + unsigned int transparent:1; /* Transparent PCI bridge */ + unsigned int multifunction:1;/* Part of multi-function device */ + /* keep track of device state */ + unsigned int is_enabled:1; /* pci_enable_device has been called */ + unsigned int is_busmaster:1; /* device is busmaster */ + unsigned int no_msi:1; /* device may not use msi */ + unsigned int no_d1d2:1; /* only allow d0 or d3 */ + unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ + unsigned int broken_parity_status:1; /* Device generates false positive parity */ + unsigned int msi_enabled:1; + unsigned int msix_enabled:1; + + u32 saved_config_space[16]; /* config space saved at suspend time */ + struct hlist_head saved_cap_space; + struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ + int rom_attr_enabled; /* has display of the rom attribute been enabled? */ + struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ +}; + +#define pci_dev_g(n) list_entry(n, struct pci_dev, global_list) +#define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) +#define to_pci_dev(n) container_of(n, struct pci_dev, dev) +#define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) + +static inline struct pci_cap_saved_state *pci_find_saved_cap( + struct pci_dev *pci_dev,char cap) +{ + struct pci_cap_saved_state *tmp; + struct hlist_node *pos; + + hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { + if (tmp->cap_nr == cap) + return tmp; + } + return NULL; +} + +static inline void pci_add_saved_cap(struct pci_dev *pci_dev, + struct pci_cap_saved_state *new_cap) +{ + hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); +} + +static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap) +{ + hlist_del(&cap->next); +} + +/* + * For PCI devices, the region numbers are assigned this way: + * + * 0-5 standard PCI regions + * 6 expansion ROM + * 7-10 bridges: address space assigned to buses behind the bridge + */ + +#define PCI_ROM_RESOURCE 6 +#define PCI_BRIDGE_RESOURCES 7 +#define PCI_NUM_RESOURCES 11 + +#ifndef PCI_BUS_NUM_RESOURCES +#define PCI_BUS_NUM_RESOURCES 8 +#endif + +#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ + +struct pci_bus { + struct list_head node; /* node in list of buses */ + struct pci_bus *parent; /* parent bus this bridge is on */ + struct list_head children; /* list of child buses */ + struct list_head devices; /* list of devices on this bus */ + struct pci_dev *self; /* bridge device as seen by parent */ + struct resource *resource[PCI_BUS_NUM_RESOURCES]; + /* address space routed to this bus */ + + struct pci_ops *ops; /* configuration access functions */ + void *sysdata; /* hook for sys-specific extension */ + struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ + + unsigned char number; /* bus number */ + unsigned char primary; /* number of primary bridge */ + unsigned char secondary; /* number of secondary bridge */ + unsigned char subordinate; /* max number of subordinate buses */ + + char name[48]; + + unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ + pci_bus_flags_t bus_flags; /* Inherited by child busses */ + struct device *bridge; + struct class_device class_dev; + struct bin_attribute *legacy_io; /* legacy I/O for this bus */ + struct bin_attribute *legacy_mem; /* legacy mem */ +}; + +#define pci_bus_b(n) list_entry(n, struct pci_bus, node) +#define to_pci_bus(n) container_of(n, struct pci_bus, class_dev) + +/* + * Error values that may be returned by PCI functions. + */ +#define PCIBIOS_SUCCESSFUL 0x00 +#define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 +#define PCIBIOS_BAD_VENDOR_ID 0x83 +#define PCIBIOS_DEVICE_NOT_FOUND 0x86 +#define PCIBIOS_BAD_REGISTER_NUMBER 0x87 +#define PCIBIOS_SET_FAILED 0x88 +#define PCIBIOS_BUFFER_TOO_SMALL 0x89 + +/* Low-level architecture-dependent routines */ + +struct pci_ops { + int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); + int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); +}; + +struct pci_raw_ops { + int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); + int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); +}; + +extern struct pci_raw_ops *raw_pci_ops; + +struct pci_bus_region { + unsigned long start; + unsigned long end; +}; + +struct pci_dynids { + spinlock_t lock; /* protects list, index */ + struct list_head list; /* for IDs added at runtime */ + unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ +}; + +/* ---------------------------------------------------------------- */ +/** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides + * a set fof callbacks in struct pci_error_handlers, then that device driver + * will be notified of PCI bus errors, and will be driven to recovery + * when an error occurs. + */ + +typedef unsigned int __bitwise pci_ers_result_t; + +enum pci_ers_result { + /* no result/none/not supported in device driver */ + PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, + + /* Device driver can recover without slot reset */ + PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, + + /* Device driver wants slot to be reset. */ + PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, + + /* Device has completely failed, is unrecoverable */ + PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, + + /* Device driver is fully recovered and operational */ + PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, +}; + +/* PCI bus error event callbacks */ +struct pci_error_handlers +{ + /* PCI bus error detected on this device */ + pci_ers_result_t (*error_detected)(struct pci_dev *dev, + enum pci_channel_state error); + + /* MMIO has been re-enabled, but not DMA */ + pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); + + /* PCI Express link has been reset */ + pci_ers_result_t (*link_reset)(struct pci_dev *dev); + + /* PCI slot has been reset */ + pci_ers_result_t (*slot_reset)(struct pci_dev *dev); + + /* Device driver may resume normal operations */ + void (*resume)(struct pci_dev *dev); +}; + +/* ---------------------------------------------------------------- */ + +struct module; +struct pci_driver { + struct list_head node; + char *name; + const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ + int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ + void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ + int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ + int (*suspend_late) (struct pci_dev *dev, pm_message_t state); + int (*resume_early) (struct pci_dev *dev); + int (*resume) (struct pci_dev *dev); /* Device woken up */ + int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */ + void (*shutdown) (struct pci_dev *dev); + + struct pci_error_handlers *err_handler; + struct device_driver driver; + struct pci_dynids dynids; + + int multithread_probe; +}; + +#define to_pci_driver(drv) container_of(drv,struct pci_driver, driver) + +/** + * PCI_DEVICE - macro used to describe a specific pci device + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID. + */ +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + +/** + * PCI_DEVICE_CLASS - macro used to describe a specific pci device class + * @dev_class: the class, subclass, prog-if triple for this device + * @dev_class_mask: the class mask for this device + * + * This macro is used to create a struct pci_device_id that matches a + * specific PCI class. The vendor, device, subvendor, and subdevice + * fields will be set to PCI_ANY_ID. + */ +#define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ + .class = (dev_class), .class_mask = (dev_class_mask), \ + .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + +/* + * pci_module_init is obsolete, this stays here till we fix up all usages of it + * in the tree. + */ +#define pci_module_init pci_register_driver + +/* these external functions are only available when PCI support is enabled */ +#ifdef CONFIG_PCI + +extern struct bus_type pci_bus_type; + +/* Do NOT directly access these two variables, unless you are arch specific pci + * code, or pci core code. */ +extern struct list_head pci_root_buses; /* list of all known PCI buses */ +extern struct list_head pci_devices; /* list of all devices */ + +void pcibios_fixup_bus(struct pci_bus *); +int __must_check pcibios_enable_device(struct pci_dev *, int mask); +char *pcibios_setup (char *str); + +/* Used only when drivers/pci/setup.c is used */ +void pcibios_align_resource(void *, struct resource *, resource_size_t, + resource_size_t); +void pcibios_update_irq(struct pci_dev *, int irq); + +/* Generic PCI functions used internally */ + +extern struct pci_bus *pci_find_bus(int domain, int busnr); +void pci_bus_add_devices(struct pci_bus *bus); +struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); +static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) +{ + struct pci_bus *root_bus; + root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata); + if (root_bus) + pci_bus_add_devices(root_bus); + return root_bus; +} +struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); +struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); +int pci_scan_slot(struct pci_bus *bus, int devfn); +struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn); +void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); +unsigned int pci_scan_child_bus(struct pci_bus *bus); +int __must_check pci_bus_add_device(struct pci_dev *dev); +void pci_read_bridge_bases(struct pci_bus *child); +struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); +int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); +extern struct pci_dev *pci_dev_get(struct pci_dev *dev); +extern void pci_dev_put(struct pci_dev *dev); +extern void pci_remove_bus(struct pci_bus *b); +extern void pci_remove_bus_device(struct pci_dev *dev); +extern void pci_stop_bus_device(struct pci_dev *dev); +void pci_setup_cardbus(struct pci_bus *bus); +extern void pci_sort_breadthfirst(void); + +/* Generic PCI functions exported to card drivers */ + +struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from); +struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from); +struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn); +int pci_find_capability (struct pci_dev *dev, int cap); +int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap); +int pci_find_ext_capability (struct pci_dev *dev, int cap); +struct pci_bus *pci_find_next_bus(const struct pci_bus *from); + +struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, + struct pci_dev *from); +struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device, + struct pci_dev *from); + +struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device, + unsigned int ss_vendor, unsigned int ss_device, + struct pci_dev *from); +struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn); +struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn); +struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from); +int pci_dev_present(const struct pci_device_id *ids); + +int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 *val); +int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 *val); +int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 *val); +int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 val); +int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 val); +int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 val); + +static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) +{ + return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val); +} +static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val) +{ + return pci_bus_read_config_word (dev->bus, dev->devfn, where, val); +} +static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val) +{ + return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val) +{ + return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val) +{ + return pci_bus_write_config_word (dev->bus, dev->devfn, where, val); +} +static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val) +{ + return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val); +} + +int __must_check pci_enable_device(struct pci_dev *dev); +int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); +void pci_disable_device(struct pci_dev *dev); +void pci_set_master(struct pci_dev *dev); +#define HAVE_PCI_SET_MWI +int __must_check pci_set_mwi(struct pci_dev *dev); +void pci_clear_mwi(struct pci_dev *dev); +void pci_intx(struct pci_dev *dev, int enable); +int pci_set_dma_mask(struct pci_dev *dev, u64 mask); +int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); +void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); +int __must_check pci_assign_resource(struct pci_dev *dev, int i); +int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i); +void pci_restore_bars(struct pci_dev *dev); + +/* ROM control related routines */ +void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); +void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size); +void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); +void pci_remove_rom(struct pci_dev *pdev); + +/* Power management related routines */ +int pci_save_state(struct pci_dev *dev); +int pci_restore_state(struct pci_dev *dev); +int pci_set_power_state(struct pci_dev *dev, pci_power_t state); +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); +int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); + +/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ +void pci_bus_assign_resources(struct pci_bus *bus); +void pci_bus_size_bridges(struct pci_bus *bus); +int pci_claim_resource(struct pci_dev *, int); +void pci_assign_unassigned_resources(void); +void pdev_enable_device(struct pci_dev *); +void pdev_sort_resources(struct pci_dev *, struct resource_list *); +void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), + int (*)(struct pci_dev *, u8, u8)); +#define HAVE_PCI_REQ_REGIONS 2 +int __must_check pci_request_regions(struct pci_dev *, const char *); +void pci_release_regions(struct pci_dev *); +int __must_check pci_request_region(struct pci_dev *, int, const char *); +void pci_release_region(struct pci_dev *, int); + +/* drivers/pci/bus.c */ +int __must_check pci_bus_alloc_resource(struct pci_bus *bus, + struct resource *res, resource_size_t size, + resource_size_t align, resource_size_t min, + unsigned int type_mask, + void (*alignf)(void *, struct resource *, + resource_size_t, resource_size_t), + void *alignf_data); +void pci_enable_bridges(struct pci_bus *bus); + +/* Proper probing supporting hot-pluggable devices */ +int __must_check __pci_register_driver(struct pci_driver *, struct module *); +static inline int __must_check pci_register_driver(struct pci_driver *driver) +{ + return __pci_register_driver(driver, THIS_MODULE); +} + +void pci_unregister_driver(struct pci_driver *); +void pci_remove_behind_bridge(struct pci_dev *); +struct pci_driver *pci_dev_driver(const struct pci_dev *); +const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev); +const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev); +int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass); + +void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), + void *userdata); +int pci_cfg_space_size(struct pci_dev *dev); +unsigned char pci_bus_max_busnr(struct pci_bus* bus); + +/* kmem_cache style wrapper around pci_alloc_consistent() */ + +#include <linux/dmapool.h> + +#define pci_pool dma_pool +#define pci_pool_create(name, pdev, size, align, allocation) \ + dma_pool_create(name, &pdev->dev, size, align, allocation) +#define pci_pool_destroy(pool) dma_pool_destroy(pool) +#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) +#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) + +enum pci_dma_burst_strategy { + PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, + strategy_parameter is N/A */ + PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter + byte boundaries */ + PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of + strategy_parameter byte boundaries */ +}; + +#if defined(CONFIG_ISA) || defined(CONFIG_EISA) +extern struct pci_dev *isa_bridge; +#endif + +struct msix_entry { + u16 vector; /* kernel uses to write allocated vector */ + u16 entry; /* driver uses to specify entry, OS writes */ +}; + + +#ifndef CONFIG_PCI_MSI +static inline void pci_scan_msi_device(struct pci_dev *dev) {} +static inline int pci_enable_msi(struct pci_dev *dev) {return -1;} +static inline void pci_disable_msi(struct pci_dev *dev) {} +static inline int pci_enable_msix(struct pci_dev* dev, + struct msix_entry *entries, int nvec) {return -1;} +static inline void pci_disable_msix(struct pci_dev *dev) {} +static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} +#else +extern void pci_scan_msi_device(struct pci_dev *dev); +extern int pci_enable_msi(struct pci_dev *dev); +extern void pci_disable_msi(struct pci_dev *dev); +extern int pci_enable_msix(struct pci_dev* dev, + struct msix_entry *entries, int nvec); +extern void pci_disable_msix(struct pci_dev *dev); +extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); +#endif + +#ifdef CONFIG_HT_IRQ +/* The functions a driver should call */ +int ht_create_irq(struct pci_dev *dev, int idx); +void ht_destroy_irq(unsigned int irq); +#endif /* CONFIG_HT_IRQ */ + +extern void pci_block_user_cfg_access(struct pci_dev *dev); +extern void pci_unblock_user_cfg_access(struct pci_dev *dev); + +/* + * PCI domain support. Sometimes called PCI segment (eg by ACPI), + * a PCI domain is defined to be a set of PCI busses which share + * configuration space. + */ +#ifndef CONFIG_PCI_DOMAINS +static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } +static inline int pci_proc_domain(struct pci_bus *bus) +{ + return 0; +} +#endif + +#else /* CONFIG_PCI is not enabled */ + +/* + * If the system does not have PCI, clearly these return errors. Define + * these as simple inline functions to avoid hair in drivers. + */ + +#define _PCI_NOP(o,s,t) \ + static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, t val) \ + { return PCIBIOS_FUNC_NOT_SUPPORTED; } +#define _PCI_NOP_ALL(o,x) _PCI_NOP(o,byte,u8 x) \ + _PCI_NOP(o,word,u16 x) \ + _PCI_NOP(o,dword,u32 x) +_PCI_NOP_ALL(read, *) +_PCI_NOP_ALL(write,) + +static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from) +{ return NULL; } + +static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn) +{ return NULL; } + +static inline struct pci_dev *pci_get_device(unsigned int vendor, + unsigned int device, struct pci_dev *from) +{ return NULL; } + +static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor, + unsigned int device, struct pci_dev *from) +{ return NULL; } + +static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device, +unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) +{ return NULL; } + +static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) +{ return NULL; } + +#define pci_dev_present(ids) (0) +#define pci_dev_put(dev) do { } while (0) + +static inline void pci_set_master(struct pci_dev *dev) { } +static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } +static inline void pci_disable_device(struct pci_dev *dev) { } +static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } +static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;} +static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;} +static inline int pci_register_driver(struct pci_driver *drv) { return 0;} +static inline void pci_unregister_driver(struct pci_driver *drv) { } +static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; } +static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; } +static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; } +static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; } + +/* Power management related routines */ +static inline int pci_save_state(struct pci_dev *dev) { return 0; } +static inline int pci_restore_state(struct pci_dev *dev) { return 0; } +static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; } +static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; } +static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } + +#define isa_bridge ((struct pci_dev *)NULL) + +#define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) + +static inline void pci_block_user_cfg_access(struct pci_dev *dev) { } +static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { } + +#endif /* CONFIG_PCI */ + +/* Include architecture-dependent settings and functions */ + +#include <asm/pci.h> + +/* these helpers provide future and backwards compatibility + * for accessing popular PCI BAR info */ +#define pci_resource_start(dev,bar) ((dev)->resource[(bar)].start) +#define pci_resource_end(dev,bar) ((dev)->resource[(bar)].end) +#define pci_resource_flags(dev,bar) ((dev)->resource[(bar)].flags) +#define pci_resource_len(dev,bar) \ + ((pci_resource_start((dev),(bar)) == 0 && \ + pci_resource_end((dev),(bar)) == \ + pci_resource_start((dev),(bar))) ? 0 : \ + \ + (pci_resource_end((dev),(bar)) - \ + pci_resource_start((dev),(bar)) + 1)) + +/* Similar to the helpers above, these manipulate per-pci_dev + * driver-specific data. They are really just a wrapper around + * the generic device structure functions of these calls. + */ +static inline void *pci_get_drvdata (struct pci_dev *pdev) +{ + return dev_get_drvdata(&pdev->dev); +} + +static inline void pci_set_drvdata (struct pci_dev *pdev, void *data) +{ + dev_set_drvdata(&pdev->dev, data); +} + +/* If you want to know what to call your pci_dev, ask this function. + * Again, it's a wrapper around the generic device. + */ +static inline char *pci_name(struct pci_dev *pdev) +{ + return pdev->dev.bus_id; +} + + +/* Some archs don't want to expose struct resource to userland as-is + * in sysfs and /proc + */ +#ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER +static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, + const struct resource *rsrc, resource_size_t *start, + resource_size_t *end) +{ + *start = rsrc->start; + *end = rsrc->end; +} +#endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ + + +/* + * The world is not perfect and supplies us with broken PCI devices. + * For at least a part of these bugs we need a work-around, so both + * generic (drivers/pci/quirks.c) and per-architecture code can define + * fixup hooks to be called for particular buggy devices. + */ + +struct pci_fixup { + u16 vendor, device; /* You can use PCI_ANY_ID here of course */ + void (*hook)(struct pci_dev *dev); +}; + +enum pci_fixup_pass { + pci_fixup_early, /* Before probing BARs */ + pci_fixup_header, /* After reading configuration header */ + pci_fixup_final, /* Final phase of device fixups */ + pci_fixup_enable, /* pci_enable_device() time */ +}; + +/* Anonymous variables would be nice... */ +#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ + static const struct pci_fixup __pci_fixup_##name __attribute_used__ \ + __attribute__((__section__(#section))) = { vendor, device, hook }; +#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ + vendor##device##hook, vendor, device, hook) +#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ + vendor##device##hook, vendor, device, hook) +#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ + vendor##device##hook, vendor, device, hook) +#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ + vendor##device##hook, vendor, device, hook) + + +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); + +extern int pci_pci_problems; +#define PCIPCI_FAIL 1 /* No PCI PCI DMA */ +#define PCIPCI_TRITON 2 +#define PCIPCI_NATOMA 4 +#define PCIPCI_VIAETBF 8 +#define PCIPCI_VSFX 16 +#define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ +#define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ + +#endif /* __KERNEL__ */ +#endif /* LINUX_PCI_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/README.origin --- a/xen/include/asm-ia64/linux/README.origin Tue Dec 12 15:25:02 2006 -0700 +++ b/xen/include/asm-ia64/linux/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -24,3 +24,15 @@ timex.h -> linux/include/linux/timex.h timex.h -> linux/include/linux/timex.h topology.h -> linux/include/linux/topology.h wait.h -> linux/include/linux/wait.h + +# The files below are from Linux-2.6.19 +completion.h -> linux/include/linux/completion.h +ioport.h -> linux/include/linux/ioport.h +klist.h -> linux/include/linux/klist.h +kref.h -> linux/include/linux/kref.h +mod_devicetable.h -> linux/include/linux/mod_devicetable.h +pci_ids.h -> linux/include/linux/pci_ids.h +pci_regs.h -> linux/include/linux/pci_regs.h +pm.h -> linux/include/linux/pm.h +sysfs.h -> linux/include/linux/sysfs.h + diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/README.origin --- a/xen/include/asm-ia64/linux/asm/README.origin Tue Dec 12 15:25:02 2006 -0700 +++ b/xen/include/asm-ia64/linux/asm/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -41,3 +41,6 @@ unaligned.h -> linux/include/asm-ia64/u unaligned.h -> linux/include/asm-ia64/unaligned.h unistd.h -> linux/include/asm-ia64/unistd.h unwind.h -> linux/include/asm-ia64/unwind.h + +# The files below are from Linux-2.6.19 +machvec_init.h -> linux/include/asm-ia64/machvec_init.h diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/machvec_init.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/machvec_init.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,32 @@ +#include <asm/machvec.h> + +extern ia64_mv_send_ipi_t ia64_send_ipi; +extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge; +extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq; +extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem; +extern ia64_mv_pci_legacy_read_t ia64_pci_legacy_read; +extern ia64_mv_pci_legacy_write_t ia64_pci_legacy_write; + +extern ia64_mv_inb_t __ia64_inb; +extern ia64_mv_inw_t __ia64_inw; +extern ia64_mv_inl_t __ia64_inl; +extern ia64_mv_outb_t __ia64_outb; +extern ia64_mv_outw_t __ia64_outw; +extern ia64_mv_outl_t __ia64_outl; +extern ia64_mv_mmiowb_t __ia64_mmiowb; +extern ia64_mv_readb_t __ia64_readb; +extern ia64_mv_readw_t __ia64_readw; +extern ia64_mv_readl_t __ia64_readl; +extern ia64_mv_readq_t __ia64_readq; +extern ia64_mv_readb_t __ia64_readb_relaxed; +extern ia64_mv_readw_t __ia64_readw_relaxed; +extern ia64_mv_readl_t __ia64_readl_relaxed; +extern ia64_mv_readq_t __ia64_readq_relaxed; + +#define MACHVEC_HELPER(name) \ + struct ia64_machine_vector machvec_##name __attribute__ ((unused, __section__ (".machvec"))) \ + = MACHVEC_INIT(name); + +#define MACHVEC_DEFINE(name) MACHVEC_HELPER(name) + +MACHVEC_DEFINE(MACHVEC_PLATFORM_NAME) diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/README.origin --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/README.origin Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,23 @@ +# Source files in this directory are identical copies of linux-2.6.19 files: +# +# NOTE: DO NOT commit changes to these files! If a file +# needs to be changed, move it to ../linux-xen and follow +# the instructions in the README there. + +geo.h -> linux/include/asm-ia64/sn/geo.h +klconfig.h -> linux/include/asm-ia64/sn/klconfig.h +l1.h -> linux/include/asm-ia64/sn/l1.h +leds.h -> linux/include/asm-ia64/sn/leds.h +module.h -> linux/include/asm-ia64/sn/module.h +pcibus_provider_defs.h -> linux/include/asm-ia64/sn/pcibus_provider_defs.h +pcidev.h -> linux/include/asm-ia64/sn/pcidev.h +pda.h -> linux/include/asm-ia64/sn/pda.h +pic.h -> linux/include/asm-ia64/sn/pic.h +shub_mmr.h -> linux/include/asm-ia64/sn/shub_mmr.h +shubio.h -> linux/include/asm-ia64/sn/shubio.h +simulator.h -> linux/include/asm-ia64/sn/simulator.h +sn_cpuid.h -> linux/include/asm-ia64/sn/sn_cpuid.h +sn_feature_sets.h -> linux/include/asm-ia64/sn/sn_feature_sets.h +tiocp.h -> linux/include/asm-ia64/sn/tiocp.h +xbow.h -> linux/arch/ia64/sn/include/xtalk/xbow.h +xwidgetdev.h -> linux/arch/ia64/sn/include/xtalk/xwidgetdev.h diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/geo.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/geo.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,132 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_GEO_H +#define _ASM_IA64_SN_GEO_H + +/* The geoid_t implementation below is based loosely on the pcfg_t + implementation in sys/SN/promcfg.h. */ + +/* Type declaractions */ + +/* Size of a geoid_t structure (must be before decl. of geoid_u) */ +#define GEOID_SIZE 8 /* Would 16 be better? The size can + be different on different platforms. */ + +#define MAX_SLOTS 0xf /* slots per module */ +#define MAX_SLABS 0xf /* slabs per slot */ + +typedef unsigned char geo_type_t; + +/* Fields common to all substructures */ +typedef struct geo_common_s { + moduleid_t module; /* The module (box) this h/w lives in */ + geo_type_t type; /* What type of h/w is named by this geoid_t */ + slabid_t slab:4; /* slab (ASIC), 0 .. 15 within slot */ + slotid_t slot:4; /* slot (Blade), 0 .. 15 within module */ +} geo_common_t; + +/* Additional fields for particular types of hardware */ +typedef struct geo_node_s { + geo_common_t common; /* No additional fields needed */ +} geo_node_t; + +typedef struct geo_rtr_s { + geo_common_t common; /* No additional fields needed */ +} geo_rtr_t; + +typedef struct geo_iocntl_s { + geo_common_t common; /* No additional fields needed */ +} geo_iocntl_t; + +typedef struct geo_pcicard_s { + geo_iocntl_t common; + char bus; /* Bus/widget number */ + char slot; /* PCI slot number */ +} geo_pcicard_t; + +/* Subcomponents of a node */ +typedef struct geo_cpu_s { + geo_node_t node; + char slice; /* Which CPU on the node */ +} geo_cpu_t; + +typedef struct geo_mem_s { + geo_node_t node; + char membus; /* The memory bus on the node */ + char memslot; /* The memory slot on the bus */ +} geo_mem_t; + + +typedef union geoid_u { + geo_common_t common; + geo_node_t node; + geo_iocntl_t iocntl; + geo_pcicard_t pcicard; + geo_rtr_t rtr; + geo_cpu_t cpu; + geo_mem_t mem; + char padsize[GEOID_SIZE]; +} geoid_t; + + +/* Preprocessor macros */ + +#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad: + module/001c07/slab/5/node/memory/2/slot/4 */ + +/* Values for geo_type_t */ +#define GEO_TYPE_INVALID 0 +#define GEO_TYPE_MODULE 1 +#define GEO_TYPE_NODE 2 +#define GEO_TYPE_RTR 3 +#define GEO_TYPE_IOCNTL 4 +#define GEO_TYPE_IOCARD 5 +#define GEO_TYPE_CPU 6 +#define GEO_TYPE_MEM 7 +#define GEO_TYPE_MAX (GEO_TYPE_MEM+1) + +/* Parameter for hwcfg_format_geoid_compt() */ +#define GEO_COMPT_MODULE 1 +#define GEO_COMPT_SLAB 2 +#define GEO_COMPT_IOBUS 3 +#define GEO_COMPT_IOSLOT 4 +#define GEO_COMPT_CPU 5 +#define GEO_COMPT_MEMBUS 6 +#define GEO_COMPT_MEMSLOT 7 + +#define GEO_INVALID_STR "<invalid>" + +#define INVALID_NASID ((nasid_t)-1) +#define INVALID_CNODEID ((cnodeid_t)-1) +#define INVALID_PNODEID ((pnodeid_t)-1) +#define INVALID_SLAB (slabid_t)-1 +#define INVALID_SLOT (slotid_t)-1 +#define INVALID_MODULE ((moduleid_t)-1) + +static inline slabid_t geo_slab(geoid_t g) +{ + return (g.common.type == GEO_TYPE_INVALID) ? + INVALID_SLAB : g.common.slab; +} + +static inline slotid_t geo_slot(geoid_t g) +{ + return (g.common.type == GEO_TYPE_INVALID) ? + INVALID_SLOT : g.common.slot; +} + +static inline moduleid_t geo_module(geoid_t g) +{ + return (g.common.type == GEO_TYPE_INVALID) ? + INVALID_MODULE : g.common.module; +} + +extern geoid_t cnodeid_get_geoid(cnodeid_t cnode); + +#endif /* _ASM_IA64_SN_GEO_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/klconfig.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/klconfig.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,246 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Derived from IRIX <sys/SN/klconfig.h>. + * + * Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (C) 1999 by Ralf Baechle + */ +#ifndef _ASM_IA64_SN_KLCONFIG_H +#define _ASM_IA64_SN_KLCONFIG_H + +/* + * The KLCONFIG structures store info about the various BOARDs found + * during Hardware Discovery. In addition, it stores info about the + * components found on the BOARDs. + */ + +typedef s32 klconf_off_t; + + +/* Functions/macros needed to use this structure */ + +typedef struct kl_config_hdr { + char pad[20]; + klconf_off_t ch_board_info; /* the link list of boards */ + char pad0[88]; +} kl_config_hdr_t; + + +#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(GLOBAL_CAC_ADDR((nasid), (off))) + +/* + * The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD + * can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to + * the LOCAL/current NODE. REMOTE means it is attached to a different + * node.(TBD - Need a way to treat ROUTER boards.) + * + * There are 2 different structures to represent these boards - + * lboard - Local board, rboard - remote board. These 2 structures + * can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer + * Figure below). The first byte of the rboard or lboard structure + * is used to find out its type - no unions are used. + * If it is a lboard, then the config info of this board will be found + * on the local node. (LOCAL NODE BASE + offset value gives pointer to + * the structure. + * If it is a rboard, the local structure contains the node number + * and the offset of the beginning of the LINKED LIST on the remote node. + * The details of the hardware on a remote node can be built locally, + * if required, by reading the LINKED LIST on the remote node and + * ignoring all the rboards on that node. + * + * The local node uses the REMOTE NODE NUMBER + OFFSET to point to the + * First board info on the remote node. The remote node list is + * traversed as the local list, using the REMOTE BASE ADDRESS and not + * the local base address and ignoring all rboard values. + * + * + KLCONFIG + + +------------+ +------------+ +------------+ +------------+ + | lboard | +-->| lboard | +-->| rboard | +-->| lboard | + +------------+ | +------------+ | +------------+ | +------------+ + | board info | | | board info | | |errinfo,bptr| | | board info | + +------------+ | +------------+ | +------------+ | +------------+ + | offset |--+ | offset |--+ | offset |--+ |offset=NULL | + +------------+ +------------+ +------------+ +------------+ + + + +------------+ + | board info | + +------------+ +--------------------------------+ + | compt 1 |------>| type, rev, diaginfo, size ... | (CPU) + +------------+ +--------------------------------+ + | compt 2 |--+ + +------------+ | +--------------------------------+ + | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK) + +------------+ +--------------------------------+ + | errinfo |--+ + +------------+ | +--------------------------------+ + +--->|r/l brd errinfo,compt err flags | + +--------------------------------+ + + * + * Each BOARD consists of COMPONENTs and the BOARD structure has + * pointers (offsets) to its COMPONENT structure. + * The COMPONENT structure has version info, size and speed info, revision, + * error info and the NIC info. This structure can accommodate any + * BOARD with arbitrary COMPONENT composition. + * + * The ERRORINFO part of each BOARD has error information + * that describes errors about the BOARD itself. It also has flags to + * indicate the COMPONENT(s) on the board that have errors. The error + * information specific to the COMPONENT is present in the respective + * COMPONENT structure. + * + * The ERRORINFO structure is also treated like a COMPONENT, ie. the + * BOARD has pointers(offset) to the ERRORINFO structure. The rboard + * structure also has a pointer to the ERRORINFO structure. This is + * the place to store ERRORINFO about a REMOTE NODE, if the HUB on + * that NODE is not working or if the REMOTE MEMORY is BAD. In cases where + * only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can + * be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info + * which is present on the REMOTE NODE.(TBD) + * REMOTE ERRINFO can be stored on any of the nearest nodes + * or on all the nearest nodes.(TBD) + * Like BOARD structures, REMOTE ERRINFO structures can be built locally + * using the rboard errinfo pointer. + * + * In order to get useful information from this Data organization, a set of + * interface routines are provided (TBD). The important thing to remember while + * manipulating the structures, is that, the NODE number information should + * be used. If the NODE is non-zero (remote) then each offset should + * be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR. + * This includes offsets for BOARDS, COMPONENTS and ERRORINFO. + * + * Note that these structures do not provide much info about connectivity. + * That info will be part of HWGRAPH, which is an extension of the cfg_t + * data structure. (ref IP27prom/cfg.h) It has to be extended to include + * the IO part of the Network(TBD). + * + * The data structures below define the above concepts. + */ + + +/* + * BOARD classes + */ + +#define KLCLASS_MASK 0xf0 +#define KLCLASS_NONE 0x00 +#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */ +#define KLCLASS_CPU KLCLASS_NODE +#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI + and the non-graphics widget boards */ +#define KLCLASS_ROUTER 0x30 /* Router board */ +#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board + so that we can record error info */ +#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */ +#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */ + +#define KLCLASS(_x) ((_x) & KLCLASS_MASK) + + +/* + * board types + */ + +#define KLTYPE_MASK 0x0f +#define KLTYPE(_x) ((_x) & KLTYPE_MASK) + +#define KLTYPE_SNIA (KLCLASS_CPU | 0x1) +#define KLTYPE_TIO (KLCLASS_CPU | 0x2) + +#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1) +#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3) +#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4) + +#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2) + +#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0) +#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4) +#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6) +#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7) +#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8) +#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9) +#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa) +#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb) +#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc) +#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd) + + +/* + * board structures + */ + +#define MAX_COMPTS_PER_BRD 24 + +typedef struct lboard_s { + klconf_off_t brd_next_any; /* Next BOARD */ + unsigned char struct_type; /* type of structure, local or remote */ + unsigned char brd_type; /* type+class */ + unsigned char brd_sversion; /* version of this structure */ + unsigned char brd_brevision; /* board revision */ + unsigned char brd_promver; /* board prom version, if any */ + unsigned char brd_flags; /* Enabled, Disabled etc */ + unsigned char brd_slot; /* slot number */ + unsigned short brd_debugsw; /* Debug switches */ + geoid_t brd_geoid; /* geo id */ + partid_t brd_partition; /* Partition number */ + unsigned short brd_diagval; /* diagnostic value */ + unsigned short brd_diagparm; /* diagnostic parameter */ + unsigned char brd_inventory; /* inventory history */ + unsigned char brd_numcompts; /* Number of components */ + nic_t brd_nic; /* Number in CAN */ + nasid_t brd_nasid; /* passed parameter */ + klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */ + klconf_off_t brd_errinfo; /* Board's error information */ + struct lboard_s *brd_parent; /* Logical parent for this brd */ + char pad0[4]; + unsigned char brd_confidence; /* confidence that the board is bad */ + nasid_t brd_owner; /* who owns this board */ + unsigned char brd_nic_flags; /* To handle 8 more NICs */ + char pad1[24]; /* future expansion */ + char brd_name[32]; + nasid_t brd_next_same_host; /* host of next brd w/same nasid */ + klconf_off_t brd_next_same; /* Next BOARD with same nasid */ +} lboard_t; + +/* + * Generic info structure. This stores common info about a + * component. + */ + +typedef struct klinfo_s { /* Generic info */ + unsigned char struct_type; /* type of this structure */ + unsigned char struct_version; /* version of this structure */ + unsigned char flags; /* Enabled, disabled etc */ + unsigned char revision; /* component revision */ + unsigned short diagval; /* result of diagnostics */ + unsigned short diagparm; /* diagnostic parameter */ + unsigned char inventory; /* previous inventory status */ + unsigned short partid; /* widget part number */ + nic_t nic; /* MUst be aligned properly */ + unsigned char physid; /* physical id of component */ + unsigned int virtid; /* virtual id as seen by system */ + unsigned char widid; /* Widget id - if applicable */ + nasid_t nasid; /* node number - from parent */ + char pad1; /* pad out structure. */ + char pad2; /* pad out structure. */ + void *data; + klconf_off_t errinfo; /* component specific errors */ + unsigned short pad3; /* pci fields have moved over to */ + unsigned short pad4; /* klbri_t */ +} klinfo_t ; + + +static inline lboard_t *find_lboard_next(lboard_t * brd) +{ + if (brd && brd->brd_next_any) + return NODE_OFFSET_TO_LBOARD(NASID_GET(brd), brd->brd_next_any); + return NULL; +} + +#endif /* _ASM_IA64_SN_KLCONFIG_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/l1.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/l1.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,51 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved. + */ + +#ifndef _ASM_IA64_SN_L1_H +#define _ASM_IA64_SN_L1_H + +/* brick type response codes */ +#define L1_BRICKTYPE_PX 0x23 /* # */ +#define L1_BRICKTYPE_PE 0x25 /* % */ +#define L1_BRICKTYPE_N_p0 0x26 /* & */ +#define L1_BRICKTYPE_IP45 0x34 /* 4 */ +#define L1_BRICKTYPE_IP41 0x35 /* 5 */ +#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */ +#define L1_BRICKTYPE_IX 0x3d /* = */ +#define L1_BRICKTYPE_IP34 0x61 /* a */ +#define L1_BRICKTYPE_GA 0x62 /* b */ +#define L1_BRICKTYPE_C 0x63 /* c */ +#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */ +#define L1_BRICKTYPE_I 0x69 /* i */ +#define L1_BRICKTYPE_N 0x6e /* n */ +#define L1_BRICKTYPE_OPUS 0x6f /* o */ +#define L1_BRICKTYPE_P 0x70 /* p */ +#define L1_BRICKTYPE_R 0x72 /* r */ +#define L1_BRICKTYPE_CHI_CG 0x76 /* v */ +#define L1_BRICKTYPE_X 0x78 /* x */ +#define L1_BRICKTYPE_X2 0x79 /* y */ +#define L1_BRICKTYPE_SA 0x5e /* ^ */ +#define L1_BRICKTYPE_PA 0x6a /* j */ +#define L1_BRICKTYPE_IA 0x6b /* k */ +#define L1_BRICKTYPE_ATHENA 0x2b /* + */ +#define L1_BRICKTYPE_DAYTONA 0x7a /* z */ +#define L1_BRICKTYPE_1932 0x2c /* . */ +#define L1_BRICKTYPE_191010 0x2e /* , */ + +/* board type response codes */ +#define L1_BOARDTYPE_IP69 0x0100 /* CA */ +#define L1_BOARDTYPE_IP63 0x0200 /* CB */ +#define L1_BOARDTYPE_BASEIO 0x0300 /* IB */ +#define L1_BOARDTYPE_PCIE2SLOT 0x0400 /* IC */ +#define L1_BOARDTYPE_PCIX3SLOT 0x0500 /* ID */ +#define L1_BOARDTYPE_PCIXPCIE4SLOT 0x0600 /* IE */ +#define L1_BOARDTYPE_ABACUS 0x0700 /* AB */ +#define L1_BOARDTYPE_DAYTONA 0x0800 /* AD */ +#define L1_BOARDTYPE_INVAL (-1) /* invalid brick type */ + +#endif /* _ASM_IA64_SN_L1_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/leds.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/leds.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,33 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_LEDS_H +#define _ASM_IA64_SN_LEDS_H + +#include <asm/sn/addrs.h> +#include <asm/sn/pda.h> +#include <asm/sn/shub_mmr.h> + +#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0)) +#define LED_CPU_SHIFT 16 + +#define LED_CPU_HEARTBEAT 0x01 +#define LED_CPU_ACTIVITY 0x02 +#define LED_ALWAYS_SET 0x00 + +/* + * Basic macros for flashing the LEDS on an SGI SN. + */ + +static __inline__ void +set_led_bits(u8 value, u8 mask) +{ + pda->led_state = (pda->led_state & ~mask) | (value & mask); + *pda->led_address = (short) pda->led_state; +} + +#endif /* _ASM_IA64_SN_LEDS_H */ + diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/module.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/module.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,127 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_MODULE_H +#define _ASM_IA64_SN_MODULE_H + +/* parameter for format_module_id() */ +#define MODULE_FORMAT_BRIEF 1 +#define MODULE_FORMAT_LONG 2 +#define MODULE_FORMAT_LCD 3 + +/* + * Module id format + * + * 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int) + * 15-8 Brick type (8-bit ascii character) + * 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int) + * + */ + +/* + * Macros for getting the brick type + */ +#define MODULE_BTYPE_MASK 0xff00 +#define MODULE_BTYPE_SHFT 8 +#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT) +#define MODULE_BT_TO_CHAR(_b) ((char)(_b)) +#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m))) + +/* + * Macros for getting the rack ID. + */ +#define MODULE_RACK_MASK 0xffff0000 +#define MODULE_RACK_SHFT 16 +#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT) + +/* + * Macros for getting the brick position + */ +#define MODULE_BPOS_MASK 0x00ff +#define MODULE_BPOS_SHFT 0 +#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT) + +/* + * Macros for encoding and decoding rack IDs + * A rack number consists of three parts: + * class (0==CPU/mixed, 1==I/O), group, number + * + * Rack number is stored just as it is displayed on the screen: + * a 3-decimal-digit number. + */ +#define RACK_CLASS_DVDR 100 +#define RACK_GROUP_DVDR 10 +#define RACK_NUM_DVDR 1 + +#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \ + (_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR) + +#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR) +#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \ + RACK_CLASS_DVDR) / RACK_GROUP_DVDR) +#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \ + RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \ + RACK_GROUP_DVDR) / RACK_NUM_DVDR) + +/* + * Macros for encoding and decoding rack IDs + * A rack number consists of three parts: + * class 1 bit, 0==CPU/mixed, 1==I/O + * group 2 bits for CPU/mixed, 3 bits for I/O + * number 3 bits for CPU/mixed, 2 bits for I/O (1 based) + */ +#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2) +#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3) + +#define RACK_CLASS_MASK(_r) 0x20 +#define RACK_CLASS_SHFT(_r) 5 +#define RACK_ADD_CLASS(_r, _c) \ + ((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r)) + +#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r) +#define RACK_GROUP_MASK(_r) \ + ( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) ) +#define RACK_ADD_GROUP(_r, _g) \ + ((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r)) + +#define RACK_NUM_SHFT(_r) 0 +#define RACK_NUM_MASK(_r) \ + ( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) ) +#define RACK_ADD_NUM(_r, _n) \ + ((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r)) + + +/* + * Brick type definitions + */ +#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */ + +extern char brick_types[]; + +#define MODULE_CBRICK 0 +#define MODULE_RBRICK 1 +#define MODULE_IBRICK 2 +#define MODULE_KBRICK 3 +#define MODULE_XBRICK 4 +#define MODULE_DBRICK 5 +#define MODULE_PBRICK 6 +#define MODULE_NBRICK 7 +#define MODULE_PEBRICK 8 +#define MODULE_PXBRICK 9 +#define MODULE_IXBRICK 10 +#define MODULE_CGBRICK 11 +#define MODULE_OPUSBRICK 12 +#define MODULE_SABRICK 13 /* TIO BringUp Brick */ +#define MODULE_IABRICK 14 +#define MODULE_PABRICK 15 +#define MODULE_GABRICK 16 +#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */ + +extern char brick_types[]; +extern void format_module_id(char *, moduleid_t, int); + +#endif /* _ASM_IA64_SN_MODULE_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/pcibus_provider_defs.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,68 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H +#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H + +/* + * SN pci asic types. Do not ever renumber these or reuse values. The + * values must agree with what prom thinks they are. + */ + +#define PCIIO_ASIC_TYPE_UNKNOWN 0 +#define PCIIO_ASIC_TYPE_PPB 1 +#define PCIIO_ASIC_TYPE_PIC 2 +#define PCIIO_ASIC_TYPE_TIOCP 3 +#define PCIIO_ASIC_TYPE_TIOCA 4 +#define PCIIO_ASIC_TYPE_TIOCE 5 + +#define PCIIO_ASIC_MAX_TYPES 6 + +/* + * Common pciio bus provider data. There should be one of these as the + * first field in any pciio based provider soft structure (e.g. pcibr_soft + * tioca_soft, etc). + */ + +struct pcibus_bussoft { + u32 bs_asic_type; /* chipset type */ + u32 bs_xid; /* xwidget id */ + u32 bs_persist_busnum; /* Persistent Bus Number */ + u32 bs_persist_segment; /* Segment Number */ + u64 bs_legacy_io; /* legacy io pio addr */ + u64 bs_legacy_mem; /* legacy mem pio addr */ + u64 bs_base; /* widget base */ + struct xwidget_info *bs_xwidget_info; +}; + +struct pci_controller; +/* + * SN pci bus indirection + */ + +struct sn_pcibus_provider { + dma_addr_t (*dma_map)(struct pci_dev *, unsigned long, size_t, int flags); + dma_addr_t (*dma_map_consistent)(struct pci_dev *, unsigned long, size_t, int flags); + void (*dma_unmap)(struct pci_dev *, dma_addr_t, int); + void * (*bus_fixup)(struct pcibus_bussoft *, struct pci_controller *); + void (*force_interrupt)(struct sn_irq_info *); + void (*target_interrupt)(struct sn_irq_info *); +}; + +/* + * Flags used by the map interfaces + * bits 3:0 specifies format of passed in address + * bit 4 specifies that address is to be used for MSI + */ + +#define SN_DMA_ADDRTYPE(x) ((x) & 0xf) +#define SN_DMA_ADDR_PHYS 1 /* address is an xio address. */ +#define SN_DMA_ADDR_XIO 2 /* address is phys memory */ +#define SN_DMA_MSI 0x10 /* Bus address is to be used for MSI */ + +extern struct sn_pcibus_provider *sn_pci_provider[]; +#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/pcidev.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/pcidev.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,83 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_PCI_PCIDEV_H +#define _ASM_IA64_SN_PCI_PCIDEV_H + +#include <linux/pci.h> + +/* + * In ia64, pci_dev->sysdata must be a *pci_controller. To provide access to + * the pcidev_info structs for all devices under a controller, we extend the + * definition of pci_controller, via sn_pci_controller, to include a list + * of pcidev_info. + */ +struct sn_pci_controller { + struct pci_controller pci_controller; + struct list_head pcidev_info; +}; + +#define SN_PCI_CONTROLLER(dev) ((struct sn_pci_controller *) dev->sysdata) + +#define SN_PCIDEV_INFO(dev) sn_pcidev_info_get(dev) + +#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \ + (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) +/* + * Given a pci_bus, return the sn pcibus_bussoft struct. Note that + * this only works for root busses, not for busses represented by PPB's. + */ + +#define SN_PCIBUS_BUSSOFT(pci_bus) \ + ((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) + +#define SN_PCIBUS_BUSSOFT_INFO(pci_bus) \ + (struct pcibus_info *)((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data)) +/* + * Given a struct pci_dev, return the sn pcibus_bussoft struct. Note + * that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due + * due to possible PPB's in the path. + */ + +#define SN_PCIDEV_BUSSOFT(pci_dev) \ + (SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info) + +#define SN_PCIDEV_BUSPROVIDER(pci_dev) \ + (SN_PCIDEV_INFO(pci_dev)->pdi_provider) + +#define PCIIO_BUS_NONE 255 /* bus 255 reserved */ +#define PCIIO_SLOT_NONE 255 +#define PCIIO_FUNC_NONE 255 +#define PCIIO_VENDOR_ID_NONE (-1) + +struct pcidev_info { + u64 pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */ + u64 pdi_slot_host_handle; /* Bus and devfn Host pci_dev */ + + struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */ + struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */ + struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */ + + struct sn_irq_info *pdi_sn_irq_info; + struct sn_pcibus_provider *pdi_provider; /* sn pci ops */ + struct pci_dev *host_pci_dev; /* host bus link */ + struct list_head pdi_list; /* List of pcidev_info */ +}; + +extern void sn_irq_fixup(struct pci_dev *pci_dev, + struct sn_irq_info *sn_irq_info); +extern void sn_irq_unfixup(struct pci_dev *pci_dev); +extern struct pcidev_info * sn_pcidev_info_get(struct pci_dev *); +extern void sn_pci_controller_fixup(int segment, int busnum, + struct pci_bus *bus); +extern void sn_bus_store_sysdata(struct pci_dev *dev); +extern void sn_bus_free_sysdata(void); +extern void sn_generate_path(struct pci_bus *pci_bus, char *address); +extern void sn_pci_fixup_slot(struct pci_dev *dev); +extern void sn_pci_unfixup_slot(struct pci_dev *dev); +extern void sn_irq_lh_init(void); +#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/pda.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/pda.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,69 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_PDA_H +#define _ASM_IA64_SN_PDA_H + +#include <linux/cache.h> +#include <asm/percpu.h> +#include <asm/system.h> + + +/* + * CPU-specific data structure. + * + * One of these structures is allocated for each cpu of a NUMA system. + * + * This structure provides a convenient way of keeping together + * all SN per-cpu data structures. + */ + +typedef struct pda_s { + + /* + * Support for SN LEDs + */ + volatile short *led_address; + u8 led_state; + u8 hb_state; /* supports blinking heartbeat leds */ + unsigned int hb_count; + + unsigned int idle_flag; + + volatile unsigned long *bedrock_rev_id; + volatile unsigned long *pio_write_status_addr; + unsigned long pio_write_status_val; + volatile unsigned long *pio_shub_war_cam_addr; + + unsigned long sn_in_service_ivecs[4]; + int sn_lb_int_war_ticks; + int sn_last_irq; + int sn_first_irq; +} pda_t; + + +#define CACHE_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) + +/* + * PDA + * Per-cpu private data area for each cpu. The PDA is located immediately after + * the IA64 cpu_data area. A full page is allocated for the cp_data area for each + * cpu but only a small amout of the page is actually used. We put the SNIA PDA + * in the same page as the cpu_data area. Note that there is a check in the setup + * code to verify that we don't overflow the page. + * + * Seems like we should should cache-line align the pda so that any changes in the + * size of the cpu_data area don't change cache layout. Should we align to 32, 64, 128 + * or 512 boundary. Each has merits. For now, pick 128 but should be revisited later. + */ +DECLARE_PER_CPU(struct pda_s, pda_percpu); + +#define pda (&__ia64_per_cpu_var(pda_percpu)) + +#define pdacpu(cpu) (&per_cpu(pda_percpu, cpu)) + +#endif /* _ASM_IA64_SN_PDA_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/pic.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/pic.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,261 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved. + */ +#ifndef _ASM_IA64_SN_PCI_PIC_H +#define _ASM_IA64_SN_PCI_PIC_H + +/* + * PIC AS DEVICE ZERO + * ------------------ + * + * PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC) + * be designated as 'device 0'. That is a departure from earlier SGI + * PCI bridges. Because of that we use config space 1 to access the + * config space of the first actual PCI device on the bus. + * Here's what the PIC manual says: + * + * The current PCI-X bus specification now defines that the parent + * hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC + * reduced the total number of devices from 8 to 4 and removed the + * device registers and windows, now only supporting devices 0,1,2, and + * 3. PIC did leave all 8 configuration space windows. The reason was + * there was nothing to gain by removing them. Here in lies the problem. + * The device numbering we do using 0 through 3 is unrelated to the device + * numbering which PCI-X requires in configuration space. In the past we + * correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc. + * PCI-X requires we start a 1, not 0 and currently the PX brick + * does associate our: + * + * device 0 with configuration space window 1, + * device 1 with configuration space window 2, + * device 2 with configuration space window 3, + * device 3 with configuration space window 4. + * + * The net effect is that all config space access are off-by-one with + * relation to other per-slot accesses on the PIC. + * Here is a table that shows some of that: + * + * Internal Slot# + * | + * | 0 1 2 3 + * ----------|--------------------------------------- + * config | 0x21000 0x22000 0x23000 0x24000 + * | + * even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd + * | + * odd rrb | n/a 0[1] n/a 1[1] + * | + * int dev | 00 01 10 11 + * | + * ext slot# | 1 2 3 4 + * ----------|--------------------------------------- + */ + +#define PIC_ATE_TARGETID_SHFT 8 +#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFFUL +#define PIC_PCI64_ATTR_TARG_SHFT 60 + + +/***************************************************************************** + *********************** PIC MMR structure mapping *************************** + *****************************************************************************/ + +/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0] + * of a 64-bit register. When writing PIC registers, always write the + * entire 64 bits. + */ + +struct pic { + + /* 0x000000-0x00FFFF -- Local Registers */ + + /* 0x000000-0x000057 -- Standard Widget Configuration */ + u64 p_wid_id; /* 0x000000 */ + u64 p_wid_stat; /* 0x000008 */ + u64 p_wid_err_upper; /* 0x000010 */ + u64 p_wid_err_lower; /* 0x000018 */ + #define p_wid_err p_wid_err_lower + u64 p_wid_control; /* 0x000020 */ + u64 p_wid_req_timeout; /* 0x000028 */ + u64 p_wid_int_upper; /* 0x000030 */ + u64 p_wid_int_lower; /* 0x000038 */ + #define p_wid_int p_wid_int_lower + u64 p_wid_err_cmdword; /* 0x000040 */ + u64 p_wid_llp; /* 0x000048 */ + u64 p_wid_tflush; /* 0x000050 */ + + /* 0x000058-0x00007F -- Bridge-specific Widget Configuration */ + u64 p_wid_aux_err; /* 0x000058 */ + u64 p_wid_resp_upper; /* 0x000060 */ + u64 p_wid_resp_lower; /* 0x000068 */ + #define p_wid_resp p_wid_resp_lower + u64 p_wid_tst_pin_ctrl; /* 0x000070 */ + u64 p_wid_addr_lkerr; /* 0x000078 */ + + /* 0x000080-0x00008F -- PMU & MAP */ + u64 p_dir_map; /* 0x000080 */ + u64 _pad_000088; /* 0x000088 */ + + /* 0x000090-0x00009F -- SSRAM */ + u64 p_map_fault; /* 0x000090 */ + u64 _pad_000098; /* 0x000098 */ + + /* 0x0000A0-0x0000AF -- Arbitration */ + u64 p_arb; /* 0x0000A0 */ + u64 _pad_0000A8; /* 0x0000A8 */ + + /* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */ + u64 p_ate_parity_err; /* 0x0000B0 */ + u64 _pad_0000B8; /* 0x0000B8 */ + + /* 0x0000C0-0x0000FF -- PCI/GIO */ + u64 p_bus_timeout; /* 0x0000C0 */ + u64 p_pci_cfg; /* 0x0000C8 */ + u64 p_pci_err_upper; /* 0x0000D0 */ + u64 p_pci_err_lower; /* 0x0000D8 */ + #define p_pci_err p_pci_err_lower + u64 _pad_0000E0[4]; /* 0x0000{E0..F8} */ + + /* 0x000100-0x0001FF -- Interrupt */ + u64 p_int_status; /* 0x000100 */ + u64 p_int_enable; /* 0x000108 */ + u64 p_int_rst_stat; /* 0x000110 */ + u64 p_int_mode; /* 0x000118 */ + u64 p_int_device; /* 0x000120 */ + u64 p_int_host_err; /* 0x000128 */ + u64 p_int_addr[8]; /* 0x0001{30,,,68} */ + u64 p_err_int_view; /* 0x000170 */ + u64 p_mult_int; /* 0x000178 */ + u64 p_force_always[8]; /* 0x0001{80,,,B8} */ + u64 p_force_pin[8]; /* 0x0001{C0,,,F8} */ + + /* 0x000200-0x000298 -- Device */ + u64 p_device[4]; /* 0x0002{00,,,18} */ + u64 _pad_000220[4]; /* 0x0002{20,,,38} */ + u64 p_wr_req_buf[4]; /* 0x0002{40,,,58} */ + u64 _pad_000260[4]; /* 0x0002{60,,,78} */ + u64 p_rrb_map[2]; /* 0x0002{80,,,88} */ + #define p_even_resp p_rrb_map[0] /* 0x000280 */ + #define p_odd_resp p_rrb_map[1] /* 0x000288 */ + u64 p_resp_status; /* 0x000290 */ + u64 p_resp_clear; /* 0x000298 */ + + u64 _pad_0002A0[12]; /* 0x0002{A0..F8} */ + + /* 0x000300-0x0003F8 -- Buffer Address Match Registers */ + struct { + u64 upper; /* 0x0003{00,,,F0} */ + u64 lower; /* 0x0003{08,,,F8} */ + } p_buf_addr_match[16]; + + /* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */ + struct { + u64 flush_w_touch; /* 0x000{400,,,5C0} */ + u64 flush_wo_touch; /* 0x000{408,,,5C8} */ + u64 inflight; /* 0x000{410,,,5D0} */ + u64 prefetch; /* 0x000{418,,,5D8} */ + u64 total_pci_retry; /* 0x000{420,,,5E0} */ + u64 max_pci_retry; /* 0x000{428,,,5E8} */ + u64 max_latency; /* 0x000{430,,,5F0} */ + u64 clear_all; /* 0x000{438,,,5F8} */ + } p_buf_count[8]; + + + /* 0x000600-0x0009FF -- PCI/X registers */ + u64 p_pcix_bus_err_addr; /* 0x000600 */ + u64 p_pcix_bus_err_attr; /* 0x000608 */ + u64 p_pcix_bus_err_data; /* 0x000610 */ + u64 p_pcix_pio_split_addr; /* 0x000618 */ + u64 p_pcix_pio_split_attr; /* 0x000620 */ + u64 p_pcix_dma_req_err_attr; /* 0x000628 */ + u64 p_pcix_dma_req_err_addr; /* 0x000630 */ + u64 p_pcix_timeout; /* 0x000638 */ + + u64 _pad_000640[120]; /* 0x000{640,,,9F8} */ + + /* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */ + struct { + u64 p_buf_addr; /* 0x000{A00,,,AF0} */ + u64 p_buf_attr; /* 0X000{A08,,,AF8} */ + } p_pcix_read_buf_64[16]; + + struct { + u64 p_buf_addr; /* 0x000{B00,,,BE0} */ + u64 p_buf_attr; /* 0x000{B08,,,BE8} */ + u64 p_buf_valid; /* 0x000{B10,,,BF0} */ + u64 __pad1; /* 0x000{B18,,,BF8} */ + } p_pcix_write_buf_64[8]; + + /* End of Local Registers -- Start of Address Map space */ + + char _pad_000c00[0x010000 - 0x000c00]; + + /* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */ + u64 p_int_ate_ram[1024]; /* 0x010000-0x011fff */ + + /* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */ + u64 p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */ + + char _pad_014000[0x18000 - 0x014000]; + + /* 0x18000-0x197F8 -- PIC Write Request Ram */ + u64 p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */ + u64 p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */ + u64 p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */ + + char _pad_019800[0x20000 - 0x019800]; + + /* 0x020000-0x027FFF -- PCI Device Configuration Spaces */ + union { + u8 c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */ + u16 s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */ + u32 l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */ + u64 d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */ + union { + u8 c[0x100 / 1]; + u16 s[0x100 / 2]; + u32 l[0x100 / 4]; + u64 d[0x100 / 8]; + } f[8]; + } p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */ + + /* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */ + union { + u8 c[0x1000 / 1]; /* 0x028000-0x029000 */ + u16 s[0x1000 / 2]; /* 0x028000-0x029000 */ + u32 l[0x1000 / 4]; /* 0x028000-0x029000 */ + u64 d[0x1000 / 8]; /* 0x028000-0x029000 */ + union { + u8 c[0x100 / 1]; + u16 s[0x100 / 2]; + u32 l[0x100 / 4]; + u64 d[0x100 / 8]; + } f[8]; + } p_type1_cfg; /* 0x028000-0x029000 */ + + char _pad_029000[0x030000-0x029000]; + + /* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */ + union { + u8 c[8 / 1]; + u16 s[8 / 2]; + u32 l[8 / 4]; + u64 d[8 / 8]; + } p_pci_iack; /* 0x030000-0x030007 */ + + char _pad_030007[0x040000-0x030008]; + + /* 0x040000-0x030007 -- PCIX Special Cycle */ + union { + u8 c[8 / 1]; + u16 s[8 / 2]; + u32 l[8 / 4]; + u64 d[8 / 8]; + } p_pcix_cycle; /* 0x040000-0x040007 */ +}; + +#endif /* _ASM_IA64_SN_PCI_PIC_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/shub_mmr.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/shub_mmr.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,502 @@ +/* + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2001-2005 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_SHUB_MMR_H +#define _ASM_IA64_SN_SHUB_MMR_H + +/* ==================================================================== */ +/* Register "SH_IPI_INT" */ +/* SHub Inter-Processor Interrupt Registers */ +/* ==================================================================== */ +#define SH1_IPI_INT __IA64_UL_CONST(0x0000000110000380) +#define SH2_IPI_INT __IA64_UL_CONST(0x0000000010000380) + +/* SH_IPI_INT_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_IPI_INT_TYPE_SHFT 0 +#define SH_IPI_INT_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) + +/* SH_IPI_INT_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_IPI_INT_AGT_SHFT 3 +#define SH_IPI_INT_AGT_MASK __IA64_UL_CONST(0x0000000000000008) + +/* SH_IPI_INT_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_IPI_INT_PID_SHFT 4 +#define SH_IPI_INT_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) + +/* SH_IPI_INT_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_IPI_INT_BASE_SHFT 21 +#define SH_IPI_INT_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) + +/* SH_IPI_INT_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_IPI_INT_IDX_SHFT 52 +#define SH_IPI_INT_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) + +/* SH_IPI_INT_SEND */ +/* Description: Send Interrupt Message to PI, This generates a puls */ +#define SH_IPI_INT_SEND_SHFT 63 +#define SH_IPI_INT_SEND_MASK __IA64_UL_CONST(0x8000000000000000) + +/* ==================================================================== */ +/* Register "SH_EVENT_OCCURRED" */ +/* SHub Interrupt Event Occurred */ +/* ==================================================================== */ +#define SH1_EVENT_OCCURRED __IA64_UL_CONST(0x0000000110010000) +#define SH1_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000110010008) +#define SH2_EVENT_OCCURRED __IA64_UL_CONST(0x0000000010010000) +#define SH2_EVENT_OCCURRED_ALIAS __IA64_UL_CONST(0x0000000010010008) + +/* ==================================================================== */ +/* Register "SH_PI_CAM_CONTROL" */ +/* CRB CAM MMR Access Control */ +/* ==================================================================== */ +#define SH1_PI_CAM_CONTROL __IA64_UL_CONST(0x0000000120050300) + +/* ==================================================================== */ +/* Register "SH_SHUB_ID" */ +/* SHub ID Number */ +/* ==================================================================== */ +#define SH1_SHUB_ID __IA64_UL_CONST(0x0000000110060580) +#define SH1_SHUB_ID_REVISION_SHFT 28 +#define SH1_SHUB_ID_REVISION_MASK __IA64_UL_CONST(0x00000000f0000000) + +/* ==================================================================== */ +/* Register "SH_RTC" */ +/* Real-time Clock */ +/* ==================================================================== */ +#define SH1_RTC __IA64_UL_CONST(0x00000001101c0000) +#define SH2_RTC __IA64_UL_CONST(0x00000002101c0000) +#define SH_RTC_MASK __IA64_UL_CONST(0x007fffffffffffff) + +/* ==================================================================== */ +/* Register "SH_PIO_WRITE_STATUS_0|1" */ +/* PIO Write Status for CPU 0 & 1 */ +/* ==================================================================== */ +#define SH1_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000120070200) +#define SH1_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000120070280) +#define SH2_PIO_WRITE_STATUS_0 __IA64_UL_CONST(0x0000000020070200) +#define SH2_PIO_WRITE_STATUS_1 __IA64_UL_CONST(0x0000000020070280) +#define SH2_PIO_WRITE_STATUS_2 __IA64_UL_CONST(0x0000000020070300) +#define SH2_PIO_WRITE_STATUS_3 __IA64_UL_CONST(0x0000000020070380) + +/* SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK */ +/* Description: Deadlock response detected */ +#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT 1 +#define SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_MASK \ + __IA64_UL_CONST(0x0000000000000002) + +/* SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT */ +/* Description: Count of currently pending PIO writes */ +#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_SHFT 56 +#define SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK \ + __IA64_UL_CONST(0x3f00000000000000) + +/* ==================================================================== */ +/* Register "SH_PIO_WRITE_STATUS_0_ALIAS" */ +/* ==================================================================== */ +#define SH1_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000120070208) +#define SH2_PIO_WRITE_STATUS_0_ALIAS __IA64_UL_CONST(0x0000000020070208) + +/* ==================================================================== */ +/* Register "SH_EVENT_OCCURRED" */ +/* SHub Interrupt Event Occurred */ +/* ==================================================================== */ +/* SH_EVENT_OCCURRED_UART_INT */ +/* Description: Pending Junk Bus UART Interrupt */ +#define SH_EVENT_OCCURRED_UART_INT_SHFT 20 +#define SH_EVENT_OCCURRED_UART_INT_MASK __IA64_UL_CONST(0x0000000000100000) + +/* SH_EVENT_OCCURRED_IPI_INT */ +/* Description: Pending IPI Interrupt */ +#define SH_EVENT_OCCURRED_IPI_INT_SHFT 28 +#define SH_EVENT_OCCURRED_IPI_INT_MASK __IA64_UL_CONST(0x0000000010000000) + +/* SH_EVENT_OCCURRED_II_INT0 */ +/* Description: Pending II 0 Interrupt */ +#define SH_EVENT_OCCURRED_II_INT0_SHFT 29 +#define SH_EVENT_OCCURRED_II_INT0_MASK __IA64_UL_CONST(0x0000000020000000) + +/* SH_EVENT_OCCURRED_II_INT1 */ +/* Description: Pending II 1 Interrupt */ +#define SH_EVENT_OCCURRED_II_INT1_SHFT 30 +#define SH_EVENT_OCCURRED_II_INT1_MASK __IA64_UL_CONST(0x0000000040000000) + +/* SH2_EVENT_OCCURRED_EXTIO_INT2 */ +/* Description: Pending SHUB 2 EXT IO INT2 */ +#define SH2_EVENT_OCCURRED_EXTIO_INT2_SHFT 33 +#define SH2_EVENT_OCCURRED_EXTIO_INT2_MASK __IA64_UL_CONST(0x0000000200000000) + +/* SH2_EVENT_OCCURRED_EXTIO_INT3 */ +/* Description: Pending SHUB 2 EXT IO INT3 */ +#define SH2_EVENT_OCCURRED_EXTIO_INT3_SHFT 34 +#define SH2_EVENT_OCCURRED_EXTIO_INT3_MASK __IA64_UL_CONST(0x0000000400000000) + +#define SH_ALL_INT_MASK \ + (SH_EVENT_OCCURRED_UART_INT_MASK | SH_EVENT_OCCURRED_IPI_INT_MASK | \ + SH_EVENT_OCCURRED_II_INT0_MASK | SH_EVENT_OCCURRED_II_INT1_MASK | \ + SH_EVENT_OCCURRED_II_INT1_MASK | SH2_EVENT_OCCURRED_EXTIO_INT2_MASK | \ + SH2_EVENT_OCCURRED_EXTIO_INT3_MASK) + + +/* ==================================================================== */ +/* LEDS */ +/* ==================================================================== */ +#define SH1_REAL_JUNK_BUS_LED0 0x7fed00000UL +#define SH1_REAL_JUNK_BUS_LED1 0x7fed10000UL +#define SH1_REAL_JUNK_BUS_LED2 0x7fed20000UL +#define SH1_REAL_JUNK_BUS_LED3 0x7fed30000UL + +#define SH2_REAL_JUNK_BUS_LED0 0xf0000000UL +#define SH2_REAL_JUNK_BUS_LED1 0xf0010000UL +#define SH2_REAL_JUNK_BUS_LED2 0xf0020000UL +#define SH2_REAL_JUNK_BUS_LED3 0xf0030000UL + +/* ==================================================================== */ +/* Register "SH1_PTC_0" */ +/* Puge Translation Cache Message Configuration Information */ +/* ==================================================================== */ +#define SH1_PTC_0 __IA64_UL_CONST(0x00000001101a0000) + +/* SH1_PTC_0_A */ +/* Description: Type */ +#define SH1_PTC_0_A_SHFT 0 + +/* SH1_PTC_0_PS */ +/* Description: Page Size */ +#define SH1_PTC_0_PS_SHFT 2 + +/* SH1_PTC_0_RID */ +/* Description: Region ID */ +#define SH1_PTC_0_RID_SHFT 8 + +/* SH1_PTC_0_START */ +/* Description: Start */ +#define SH1_PTC_0_START_SHFT 63 + +/* ==================================================================== */ +/* Register "SH1_PTC_1" */ +/* Puge Translation Cache Message Configuration Information */ +/* ==================================================================== */ +#define SH1_PTC_1 __IA64_UL_CONST(0x00000001101a0080) + +/* SH1_PTC_1_START */ +/* Description: PTC_1 Start */ +#define SH1_PTC_1_START_SHFT 63 + +/* ==================================================================== */ +/* Register "SH2_PTC" */ +/* Puge Translation Cache Message Configuration Information */ +/* ==================================================================== */ +#define SH2_PTC __IA64_UL_CONST(0x0000000170000000) + +/* SH2_PTC_A */ +/* Description: Type */ +#define SH2_PTC_A_SHFT 0 + +/* SH2_PTC_PS */ +/* Description: Page Size */ +#define SH2_PTC_PS_SHFT 2 + +/* SH2_PTC_RID */ +/* Description: Region ID */ +#define SH2_PTC_RID_SHFT 4 + +/* SH2_PTC_START */ +/* Description: Start */ +#define SH2_PTC_START_SHFT 63 + +/* SH2_PTC_ADDR_RID */ +/* Description: Region ID */ +#define SH2_PTC_ADDR_SHFT 4 +#define SH2_PTC_ADDR_MASK __IA64_UL_CONST(0x1ffffffffffff000) + +/* ==================================================================== */ +/* Register "SH_RTC1_INT_CONFIG" */ +/* SHub RTC 1 Interrupt Config Registers */ +/* ==================================================================== */ + +#define SH1_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000110001480) +#define SH2_RTC1_INT_CONFIG __IA64_UL_CONST(0x0000000010001480) +#define SH_RTC1_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff) +#define SH_RTC1_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_RTC1_INT_CONFIG_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_RTC1_INT_CONFIG_TYPE_SHFT 0 +#define SH_RTC1_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) + +/* SH_RTC1_INT_CONFIG_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_RTC1_INT_CONFIG_AGT_SHFT 3 +#define SH_RTC1_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008) + +/* SH_RTC1_INT_CONFIG_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_RTC1_INT_CONFIG_PID_SHFT 4 +#define SH_RTC1_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) + +/* SH_RTC1_INT_CONFIG_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_RTC1_INT_CONFIG_BASE_SHFT 21 +#define SH_RTC1_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) + +/* SH_RTC1_INT_CONFIG_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_RTC1_INT_CONFIG_IDX_SHFT 52 +#define SH_RTC1_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) + +/* ==================================================================== */ +/* Register "SH_RTC1_INT_ENABLE" */ +/* SHub RTC 1 Interrupt Enable Registers */ +/* ==================================================================== */ + +#define SH1_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000110001500) +#define SH2_RTC1_INT_ENABLE __IA64_UL_CONST(0x0000000010001500) +#define SH_RTC1_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001) +#define SH_RTC1_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_RTC1_INT_ENABLE_RTC1_ENABLE */ +/* Description: Enable RTC 1 Interrupt */ +#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_SHFT 0 +#define SH_RTC1_INT_ENABLE_RTC1_ENABLE_MASK \ + __IA64_UL_CONST(0x0000000000000001) + +/* ==================================================================== */ +/* Register "SH_RTC2_INT_CONFIG" */ +/* SHub RTC 2 Interrupt Config Registers */ +/* ==================================================================== */ + +#define SH1_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000110001580) +#define SH2_RTC2_INT_CONFIG __IA64_UL_CONST(0x0000000010001580) +#define SH_RTC2_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff) +#define SH_RTC2_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_RTC2_INT_CONFIG_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_RTC2_INT_CONFIG_TYPE_SHFT 0 +#define SH_RTC2_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) + +/* SH_RTC2_INT_CONFIG_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_RTC2_INT_CONFIG_AGT_SHFT 3 +#define SH_RTC2_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008) + +/* SH_RTC2_INT_CONFIG_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_RTC2_INT_CONFIG_PID_SHFT 4 +#define SH_RTC2_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) + +/* SH_RTC2_INT_CONFIG_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_RTC2_INT_CONFIG_BASE_SHFT 21 +#define SH_RTC2_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) + +/* SH_RTC2_INT_CONFIG_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_RTC2_INT_CONFIG_IDX_SHFT 52 +#define SH_RTC2_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) + +/* ==================================================================== */ +/* Register "SH_RTC2_INT_ENABLE" */ +/* SHub RTC 2 Interrupt Enable Registers */ +/* ==================================================================== */ + +#define SH1_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000110001600) +#define SH2_RTC2_INT_ENABLE __IA64_UL_CONST(0x0000000010001600) +#define SH_RTC2_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001) +#define SH_RTC2_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_RTC2_INT_ENABLE_RTC2_ENABLE */ +/* Description: Enable RTC 2 Interrupt */ +#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_SHFT 0 +#define SH_RTC2_INT_ENABLE_RTC2_ENABLE_MASK \ + __IA64_UL_CONST(0x0000000000000001) + +/* ==================================================================== */ +/* Register "SH_RTC3_INT_CONFIG" */ +/* SHub RTC 3 Interrupt Config Registers */ +/* ==================================================================== */ + +#define SH1_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000110001680) +#define SH2_RTC3_INT_CONFIG __IA64_UL_CONST(0x0000000010001680) +#define SH_RTC3_INT_CONFIG_MASK __IA64_UL_CONST(0x0ff3ffffffefffff) +#define SH_RTC3_INT_CONFIG_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_RTC3_INT_CONFIG_TYPE */ +/* Description: Type of Interrupt: 0=INT, 2=PMI, 4=NMI, 5=INIT */ +#define SH_RTC3_INT_CONFIG_TYPE_SHFT 0 +#define SH_RTC3_INT_CONFIG_TYPE_MASK __IA64_UL_CONST(0x0000000000000007) + +/* SH_RTC3_INT_CONFIG_AGT */ +/* Description: Agent, must be 0 for SHub */ +#define SH_RTC3_INT_CONFIG_AGT_SHFT 3 +#define SH_RTC3_INT_CONFIG_AGT_MASK __IA64_UL_CONST(0x0000000000000008) + +/* SH_RTC3_INT_CONFIG_PID */ +/* Description: Processor ID, same setting as on targeted McKinley */ +#define SH_RTC3_INT_CONFIG_PID_SHFT 4 +#define SH_RTC3_INT_CONFIG_PID_MASK __IA64_UL_CONST(0x00000000000ffff0) + +/* SH_RTC3_INT_CONFIG_BASE */ +/* Description: Optional interrupt vector area, 2MB aligned */ +#define SH_RTC3_INT_CONFIG_BASE_SHFT 21 +#define SH_RTC3_INT_CONFIG_BASE_MASK __IA64_UL_CONST(0x0003ffffffe00000) + +/* SH_RTC3_INT_CONFIG_IDX */ +/* Description: Targeted McKinley interrupt vector */ +#define SH_RTC3_INT_CONFIG_IDX_SHFT 52 +#define SH_RTC3_INT_CONFIG_IDX_MASK __IA64_UL_CONST(0x0ff0000000000000) + +/* ==================================================================== */ +/* Register "SH_RTC3_INT_ENABLE" */ +/* SHub RTC 3 Interrupt Enable Registers */ +/* ==================================================================== */ + +#define SH1_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000110001700) +#define SH2_RTC3_INT_ENABLE __IA64_UL_CONST(0x0000000010001700) +#define SH_RTC3_INT_ENABLE_MASK __IA64_UL_CONST(0x0000000000000001) +#define SH_RTC3_INT_ENABLE_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_RTC3_INT_ENABLE_RTC3_ENABLE */ +/* Description: Enable RTC 3 Interrupt */ +#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_SHFT 0 +#define SH_RTC3_INT_ENABLE_RTC3_ENABLE_MASK \ + __IA64_UL_CONST(0x0000000000000001) + +/* SH_EVENT_OCCURRED_RTC1_INT */ +/* Description: Pending RTC 1 Interrupt */ +#define SH_EVENT_OCCURRED_RTC1_INT_SHFT 24 +#define SH_EVENT_OCCURRED_RTC1_INT_MASK __IA64_UL_CONST(0x0000000001000000) + +/* SH_EVENT_OCCURRED_RTC2_INT */ +/* Description: Pending RTC 2 Interrupt */ +#define SH_EVENT_OCCURRED_RTC2_INT_SHFT 25 +#define SH_EVENT_OCCURRED_RTC2_INT_MASK __IA64_UL_CONST(0x0000000002000000) + +/* SH_EVENT_OCCURRED_RTC3_INT */ +/* Description: Pending RTC 3 Interrupt */ +#define SH_EVENT_OCCURRED_RTC3_INT_SHFT 26 +#define SH_EVENT_OCCURRED_RTC3_INT_MASK __IA64_UL_CONST(0x0000000004000000) + +/* ==================================================================== */ +/* Register "SH_IPI_ACCESS" */ +/* CPU interrupt Access Permission Bits */ +/* ==================================================================== */ + +#define SH1_IPI_ACCESS __IA64_UL_CONST(0x0000000110060480) +#define SH2_IPI_ACCESS0 __IA64_UL_CONST(0x0000000010060c00) +#define SH2_IPI_ACCESS1 __IA64_UL_CONST(0x0000000010060c80) +#define SH2_IPI_ACCESS2 __IA64_UL_CONST(0x0000000010060d00) +#define SH2_IPI_ACCESS3 __IA64_UL_CONST(0x0000000010060d80) + +/* ==================================================================== */ +/* Register "SH_INT_CMPB" */ +/* RTC Compare Value for Processor B */ +/* ==================================================================== */ + +#define SH1_INT_CMPB __IA64_UL_CONST(0x00000001101b0080) +#define SH2_INT_CMPB __IA64_UL_CONST(0x00000000101b0080) +#define SH_INT_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff) +#define SH_INT_CMPB_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_INT_CMPB_REAL_TIME_CMPB */ +/* Description: Real Time Clock Compare */ +#define SH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 +#define SH_INT_CMPB_REAL_TIME_CMPB_MASK __IA64_UL_CONST(0x007fffffffffffff) + +/* ==================================================================== */ +/* Register "SH_INT_CMPC" */ +/* RTC Compare Value for Processor C */ +/* ==================================================================== */ + +#define SH1_INT_CMPC __IA64_UL_CONST(0x00000001101b0100) +#define SH2_INT_CMPC __IA64_UL_CONST(0x00000000101b0100) +#define SH_INT_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff) +#define SH_INT_CMPC_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_INT_CMPC_REAL_TIME_CMPC */ +/* Description: Real Time Clock Compare */ +#define SH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 +#define SH_INT_CMPC_REAL_TIME_CMPC_MASK __IA64_UL_CONST(0x007fffffffffffff) + +/* ==================================================================== */ +/* Register "SH_INT_CMPD" */ +/* RTC Compare Value for Processor D */ +/* ==================================================================== */ + +#define SH1_INT_CMPD __IA64_UL_CONST(0x00000001101b0180) +#define SH2_INT_CMPD __IA64_UL_CONST(0x00000000101b0180) +#define SH_INT_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff) +#define SH_INT_CMPD_INIT __IA64_UL_CONST(0x0000000000000000) + +/* SH_INT_CMPD_REAL_TIME_CMPD */ +/* Description: Real Time Clock Compare */ +#define SH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 +#define SH_INT_CMPD_REAL_TIME_CMPD_MASK __IA64_UL_CONST(0x007fffffffffffff) + +/* ==================================================================== */ +/* Register "SH_MD_DQLP_MMR_DIR_PRIVEC0" */ +/* privilege vector for acc=0 */ +/* ==================================================================== */ +#define SH1_MD_DQLP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100030300) + +/* ==================================================================== */ +/* Register "SH_MD_DQRP_MMR_DIR_PRIVEC0" */ +/* privilege vector for acc=0 */ +/* ==================================================================== */ +#define SH1_MD_DQRP_MMR_DIR_PRIVEC0 __IA64_UL_CONST(0x0000000100050300) + +/* ==================================================================== */ +/* Some MMRs are functionally identical (or close enough) on both SHUB1 */ +/* and SHUB2 that it makes sense to define a geberic name for the MMR. */ +/* It is acceptible to use (for example) SH_IPI_INT to reference the */ +/* the IPI MMR. The value of SH_IPI_INT is determined at runtime based */ +/* on the type of the SHUB. Do not use these #defines in performance */ +/* critical code or loops - there is a small performance penalty. */ +/* ==================================================================== */ +#define shubmmr(a,b) (is_shub2() ? a##2_##b : a##1_##b) + +#define SH_REAL_JUNK_BUS_LED0 shubmmr(SH, REAL_JUNK_BUS_LED0) +#define SH_IPI_INT shubmmr(SH, IPI_INT) +#define SH_EVENT_OCCURRED shubmmr(SH, EVENT_OCCURRED) +#define SH_EVENT_OCCURRED_ALIAS shubmmr(SH, EVENT_OCCURRED_ALIAS) +#define SH_RTC shubmmr(SH, RTC) +#define SH_RTC1_INT_CONFIG shubmmr(SH, RTC1_INT_CONFIG) +#define SH_RTC1_INT_ENABLE shubmmr(SH, RTC1_INT_ENABLE) +#define SH_RTC2_INT_CONFIG shubmmr(SH, RTC2_INT_CONFIG) +#define SH_RTC2_INT_ENABLE shubmmr(SH, RTC2_INT_ENABLE) +#define SH_RTC3_INT_CONFIG shubmmr(SH, RTC3_INT_CONFIG) +#define SH_RTC3_INT_ENABLE shubmmr(SH, RTC3_INT_ENABLE) +#define SH_INT_CMPB shubmmr(SH, INT_CMPB) +#define SH_INT_CMPC shubmmr(SH, INT_CMPC) +#define SH_INT_CMPD shubmmr(SH, INT_CMPD) + +/* ========================================================================== */ +/* Register "SH2_BT_ENG_CSR_0" */ +/* Engine 0 Control and Status Register */ +/* ========================================================================== */ + +#define SH2_BT_ENG_CSR_0 __IA64_UL_CONST(0x0000000030040000) +#define SH2_BT_ENG_SRC_ADDR_0 __IA64_UL_CONST(0x0000000030040080) +#define SH2_BT_ENG_DEST_ADDR_0 __IA64_UL_CONST(0x0000000030040100) +#define SH2_BT_ENG_NOTIF_ADDR_0 __IA64_UL_CONST(0x0000000030040180) + +/* ========================================================================== */ +/* BTE interfaces 1-3 */ +/* ========================================================================== */ + +#define SH2_BT_ENG_CSR_1 __IA64_UL_CONST(0x0000000030050000) +#define SH2_BT_ENG_CSR_2 __IA64_UL_CONST(0x0000000030060000) +#define SH2_BT_ENG_CSR_3 __IA64_UL_CONST(0x0000000030070000) + +#endif /* _ASM_IA64_SN_SHUB_MMR_H */ diff -r c0d41ac21486 -r de3d55c10f16 xen/include/asm-ia64/linux/asm/sn/shubio.h --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/xen/include/asm-ia64/linux/asm/sn/shubio.h Wed Dec 13 11:08:40 2006 -0700 @@ -0,0 +1,3358 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved. + */ + +#ifndef _ASM_IA64_SN_SHUBIO_H +#define _ASM_IA64_SN_SHUBIO_H + +#define HUB_WIDGET_ID_MAX 0xf +#define IIO_NUM_ITTES 7 +#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1) + +#define IIO_WID 0x00400000 /* Crosstalk Widget Identification */ + /* This register is also accessible from + * Crosstalk at address 0x0. */ +#define IIO_WSTAT 0x00400008 /* Crosstalk Widget Status */ +#define IIO_WCR 0x00400020 /* Crosstalk Widget Control Register */ +#define IIO_ILAPR 0x00400100 /* IO Local Access Protection Register */ +#define IIO_ILAPO 0x00400108 /* IO Local Access Protection Override */ +#define IIO_IOWA 0x00400110 /* IO Outbound Widget Access */ +#define IIO_IIWA 0x00400118 /* IO Inbound Widget Access */ +#define IIO_IIDEM 0x00400120 /* IO Inbound Device Error Mask */ +#define IIO_ILCSR 0x00400128 /* IO LLP Control and Status Register */ +#define IIO_ILLR 0x00400130 /* IO LLP Log Register */ +#define IIO_IIDSR 0x00400138 /* IO Interrupt Destination */ + +#define IIO_IGFX0 0x00400140 /* IO Graphics Node-Widget Map 0 */ +#define IIO_IGFX1 0x00400148 /* IO Graphics Node-Widget Map 1 */ + +#define IIO_ISCR0 0x00400150 /* IO Scratch Register 0 */ +#define IIO_ISCR1 0x00400158 /* IO Scratch Register 1 */ + +#define IIO_ITTE1 0x00400160 /* IO Translation Table Entry 1 */ +#define IIO_ITTE2 0x00400168 /* IO Translation Table Entry 2 */ +#define IIO_ITTE3 0x00400170 /* IO Translation Table Entry 3 */ +#define IIO_ITTE4 0x00400178 /* IO Translation Table Entry 4 */ +#define IIO_ITTE5 0x00400180 /* IO Translation Table Entry 5 */ +#define IIO_ITTE6 0x00400188 /* IO Translation Table Entry 6 */ +#define IIO_ITTE7 0x00400190 /* IO Translation Table Entry 7 */ + +#define IIO_IPRB0 0x00400198 /* IO PRB Entry 0 */ +#define IIO_IPRB8 0x004001A0 /* IO PRB Entry 8 */ +#define IIO_IPRB9 0x004001A8 /* IO PRB Entry 9 */ +#define IIO_IPRBA 0x004001B0 /* IO PRB Entry A */ +#define IIO_IPRBB 0x004001B8 /* IO PRB Entry B */ +#define IIO_IPRBC 0x004001C0 /* IO PRB Entry C */ +#define IIO_IPRBD 0x004001C8 /* IO PRB Entry D */ +#define IIO_IPRBE 0x004001D0 /* IO PRB Entry E */ +#define IIO_IPRBF 0x004001D8 /* IO PRB Entry F */ + +#define IIO_IXCC 0x004001E0 /* IO Crosstalk Credit Count Timeout */ +#define IIO_IMEM 0x004001E8 /* IO Miscellaneous Error Mask */ +#define IIO_IXTT 0x004001F0 /* IO Crosstalk Timeout Threshold */ +#define IIO_IECLR 0x004001F8 /* IO Error Clear Register */ +#define IIO_IBCR 0x00400200 /* IO BTE Control Register */ + +#define IIO_IXSM 0x00400208 /* IO Crosstalk Spurious Message */ +#define IIO_IXSS 0x00400210 /* IO Crosstalk Spurious Sideband */ + +#define IIO_ILCT 0x00400218 /* IO LLP Channel Test */ + +#define IIO_IIEPH1 0x00400220 /* IO Incoming Error Packet Header, Part 1 */ +#define IIO_IIEPH2 0x00400228 /* IO Incoming Error Packet Header, Part 2 */ + +#define IIO_ISLAPR 0x00400230 /* IO SXB Local Access Protection Regster */ +#define IIO_ISLAPO 0x00400238 /* IO SXB Local Access Protection Override */ + +#define IIO_IWI 0x00400240 /* IO Wrapper Interrupt Register */ +#define IIO_IWEL 0x00400248 /* IO Wrapper Error Log Register */ +#define IIO_IWC 0x00400250 /* IO Wrapper Control Register */ +#define IIO_IWS 0x00400258 /* IO Wrapper Status Register */ +#define IIO_IWEIM 0x00400260 /* IO Wrapper Error Interrupt Masking Register */ + +#define IIO_IPCA 0x00400300 /* IO PRB Counter Adjust */ + +#define IIO_IPRTE0_A 0x00400308 /* IO PIO Read Address Table Entry 0, Part A */ +#define IIO_IPRTE1_A 0x00400310 /* IO PIO Read Address Table Entry 1, Part A */ +#define IIO_IPRTE2_A 0x00400318 /* IO PIO Read Address Table Entry 2, Part A */ +#define IIO_IPRTE3_A 0x00400320 /* IO PIO Read Address Table Entry 3, Part A */ +#define IIO_IPRTE4_A 0x00400328 /* IO PIO Read Address Table Entry 4, Part A */ +#define IIO_IPRTE5_A 0x00400330 /* IO PIO Read Address Table Entry 5, Part A */ +#define IIO_IPRTE6_A 0x00400338 /* IO PIO Read Address Table Entry 6, Part A */ +#define IIO_IPRTE7_A 0x00400340 /* IO PIO Read Address Table Entry 7, Part A */ + +#define IIO_IPRTE0_B 0x00400348 /* IO PIO Read Address Table Entry 0, Part B */ +#define IIO_IPRTE1_B 0x00400350 /* IO PIO Read Address Table Entry 1, Part B */ +#define IIO_IPRTE2_B 0x00400358 /* IO PIO Read Address Table Entry 2, Part B */ +#define IIO_IPRTE3_B 0x00400360 /* IO PIO Read Address Table Entry 3, Part B */ +#define IIO_IPRTE4_B 0x00400368 /* IO PIO Read Address Table Entry 4, Part B */ +#define IIO_IPRTE5_B 0x00400370 /* IO PIO Read Address Table Entry 5, Part B */ +#define IIO_IPRTE6_B 0x00400378 /* IO PIO Read Address Table Entry 6, Part B */ +#define IIO_IPRTE7_B 0x00400380 /* IO PIO Read Address Table Entry 7, Part B */ + +#define IIO_IPDR 0x00400388 /* IO PIO Deallocation Register */ +#define IIO_ICDR 0x00400390 /* IO CRB Entry Deallocation Register */ +#define IIO_IFDR 0x00400398 /* IO IOQ FIFO Depth Register */ +#define IIO_IIAP 0x004003A0 /* IO IIQ Arbitration Parameters */ +#define IIO_ICMR 0x004003A8 /* IO CRB Management Register */ +#define IIO_ICCR 0x004003B0 /* IO CRB Control Register */ +#define IIO_ICTO 0x004003B8 /* IO CRB Timeout */ +#define IIO_ICTP 0x004003C0 /* IO CRB Timeout Prescalar */ + +#define IIO_ICRB0_A 0x00400400 /* IO CRB Entry 0_A */ +#define IIO_ICRB0_B 0x00400408 /* IO CRB Entry 0_B */ +#define IIO_ICRB0_C 0x00400410 /* IO CRB Entry 0_C */ +#define IIO_ICRB0_D 0x00400418 /* IO CRB Entry 0_D */ +#define IIO_ICRB0_E 0x00400420 /* IO CRB Entry 0_E */ + +#define IIO_ICRB1_A 0x00400430 /* IO CRB Entry 1_A */ +#define IIO_ICRB1_B 0x00400438 /* IO CRB Entry 1_B */ +#define IIO_ICRB1_C 0x00400440 /* IO CRB Entry 1_C */ +#define IIO_ICRB1_D 0x00400448 /* IO CRB Entry 1_D */ +#define IIO_ICRB1_E 0x00400450 /* IO CRB Entry 1_E */ + +#define IIO_ICRB2_A 0x00400460 /* IO CRB Entry 2_A */ +#define IIO_ICRB2_B 0x00400468 /* IO CRB Entry 2_B */ +#define IIO_ICRB2_C 0x00400470 /* IO CRB Entry 2_C */ +#define IIO_ICRB2_D 0x00400478 /* IO CRB Entry 2_D */ +#define IIO_ICRB2_E 0x00400480 /* IO CRB Entry 2_E */ + +#define IIO_ICRB3_A 0x00400490 /* IO CRB Entry 3_A */ +#define IIO_ICRB3_B 0x00400498 /* IO CRB Entry 3_B */ +#define IIO_ICRB3_C 0x004004a0 /* IO CRB Entry 3_C */ +#define IIO_ICRB3_D 0x004004a8 /* IO CRB Entry 3_D */ +#define IIO_ICRB3_E 0x004004b0 /* IO CRB Entry 3_E */ + +#define IIO_ICRB4_A 0x004004c0 /* IO CRB Entry 4_A */ +#define IIO_ICRB4_B 0x004004c8 /* IO CRB Entry 4_B */ +#define IIO_ICRB4_C 0x004004d0 /* IO CRB Entry 4_C */ +#define IIO_ICRB4_D 0x004004d8 /* IO CRB Entry 4_D */ +#define IIO_ICRB4_E 0x004004e0 /* IO CRB Entry 4_E */ + +#define IIO_ICRB5_A 0x004004f0 /* IO CRB Entry 5_A */ +#define IIO_ICRB5_B 0x004004f8 /* IO CRB Entry 5_B */ +#define IIO_ICRB5_C 0x00400500 /* IO CRB Entry 5_C */ +#define IIO_ICRB5_D 0x00400508 /* IO CRB Entry 5_D */ +#define IIO_ICRB5_E 0x00400510 /* IO CRB Entry 5_E */ + +#define IIO_ICRB6_A 0x00400520 /* IO CRB Entry 6_A */ +#define IIO_ICRB6_B 0x00400528 /* IO CRB Entry 6_B */ +#define IIO_ICRB6_C 0x00400530 /* IO CRB Entry 6_C */ +#define IIO_ICRB6_D 0x00400538 /* IO CRB Entry 6_D */ +#define IIO_ICRB6_E 0x00400540 /* IO CRB Entry 6_E */ + +#define IIO_ICRB7_A 0x00400550 /* IO CRB Entry 7_A */ +#define IIO_ICRB7_B 0x00400558 /* IO CRB Entry 7_B */ +#define IIO_ICRB7_C 0x00400560 /* IO CRB Entry 7_C */ +#define IIO_ICRB7_D 0x00400568 /* IO CRB Entry 7_D */ +#define IIO_ICRB7_E 0x00400570 /* IO CRB Entry 7_E */ + +#define IIO_ICRB8_A 0x00400580 /* IO CRB Entry 8_A */ +#define IIO_ICRB8_B 0x00400588 /* IO CRB Entry 8_B */ +#define IIO_ICRB8_C 0x00400590 /* IO CRB Entry 8_C */ +#define IIO_ICRB8_D 0x00400598 /* IO CRB Entry 8_D */ +#define IIO_ICRB8_E 0x004005a0 /* IO CRB Entry 8_E */ + +#define IIO_ICRB9_A 0x004005b0 /* IO CRB Entry 9_A */ +#define IIO_ICRB9_B 0x004005b8 /* IO CRB Entry 9_B */ +#define IIO_ICRB9_C 0x004005c0 /* IO CRB Entry 9_C */ +#define IIO_ICRB9_D 0x004005c8 /* IO CRB Entry 9_D */ +#define IIO_ICRB9_E 0x004005d0 /* IO CRB Entry 9_E */ + +#define IIO_ICRBA_A 0x004005e0 /* IO CRB Entry A_A */ +#define IIO_ICRBA_B 0x004005e8 /* IO CRB Entry A_B */ +#define IIO_ICRBA_C 0x004005f0 /* IO CRB Entry A_C */ +#define IIO_ICRBA_D 0x004005f8 /* IO CRB Entry A_D */ +#define IIO_ICRBA_E 0x00400600 /* IO CRB Entry A_E */ + +#define IIO_ICRBB_A 0x00400610 /* IO CRB Entry B_A */ +#define IIO_ICRBB_B 0x00400618 /* IO CRB Entry B_B */ +#define IIO_ICRBB_C 0x00400620 /* IO CRB Entry B_C */ +#define IIO_ICRBB_D 0x00400628 /* IO CRB Entry B_D */ +#define IIO_ICRBB_E 0x00400630 /* IO CRB Entry B_E */ + +#define IIO_ICRBC_A 0x00400640 /* IO CRB Entry C_A */ +#define IIO_ICRBC_B 0x00400648 /* IO CRB Entry C_B */ +#define IIO_ICRBC_C 0x00400650 /* IO CRB Entry C_C */ +#define IIO_ICRBC_D 0x00400658 /* IO CRB Entry C_D */ +#define IIO_ICRBC_E 0x00400660 /* IO CRB Entry C_E */ + +#define IIO_ICRBD_A 0x00400670 /* IO CRB Entry D_A */ +#define IIO_ICRBD_B 0x00400678 /* IO CRB Entry D_B */ +#define IIO_ICRBD_C 0x00400680 /* IO CRB Entry D_C */ +#define IIO_ICRBD_D 0x00400688 /* IO CRB Entry D_D */ +#define IIO_ICRBD_E 0x00400690 /* IO CRB Entry D_E */ + +#define IIO_ICRBE_A 0x004006a0 /* IO CRB Entry E_A */ +#define IIO_ICRBE_B 0x004006a8 /* IO CRB Entry E_B */ +#define IIO_ICRBE_C 0x004006b0 /* IO CRB Entry E_C */ +#define IIO_ICRBE_D 0x004006b8 /* IO CRB Entry E_D */ +#define IIO_ICRBE_E 0x004006c0 /* IO CRB Entry E_E */ + +#define IIO_ICSML 0x00400700 /* IO CRB Spurious Message Low */ +#define IIO_ICSMM 0x00400708 /* IO CRB Spurious Message Middle */ +#define IIO_ICSMH 0x00400710 /* IO CRB Spurious Message High */ + +#define IIO_IDBSS 0x00400718 /* IO Debug Submenu Select */ + +#define IIO_IBLS0 0x00410000 /* IO BTE Length Status 0 */ +#define IIO_IBSA0 0x00410008 /* IO BTE Source Address 0 */ +#define IIO_IBDA0 0x00410010 /* IO BTE Destination Address 0 */ +#define IIO_IBCT0 0x00410018 /* IO BTE Control Terminate 0 */ +#define IIO_IBNA0 0x00410020 /* IO BTE Notification Address 0 */ +#define IIO_IBIA0 0x00410028 /* IO BTE Interrupt Address 0 */ +#define IIO_IBLS1 0x00420000 /* IO BTE Length Status 1 */ +#define IIO_IBSA1 0x00420008 /* IO BTE Source Address 1 */ +#define IIO_IBDA1 0x00420010 /* IO BTE Destination Address 1 */ +#define IIO_IBCT1 0x00420018 /* IO BTE Control Terminate 1 */ +#define IIO_IBNA1 0x00420020 /* IO BTE Notification Address 1 */ +#define IIO_IBIA1 0x00420028 /* IO BTE Interrupt Address 1 */ + +#define IIO_IPCR 0x00430000 /* IO Performance Control */ +#define IIO_IPPR 0x00430008 /* IO Performance Profiling */ + +/************************************************************************ + * * + * Description: This register echoes some information from the * + * LB_REV_ID register. It is available through Crosstalk as described * + * above. The REV_NUM and MFG_NUM fields receive their values from * + * the REVISION and MANUFACTURER fields in the LB_REV_ID register. * + * The PART_NUM field's value is the Crosstalk device ID number that * + * Steve Miller assigned to the SHub chip. * + * * + ************************************************************************/ + +typedef union ii_wid_u { + u64 ii_wid_regval; + struct { + u64 w_rsvd_1:1; + u64 w_mfg_num:11; + u64 w_part_num:16; + u64 w_rev_num:4; + u64 w_rsvd:32; + } ii_wid_fld_s; +} ii_wid_u_t; + +/************************************************************************ + * * + * The fields in this register are set upon detection of an error * + * and cleared by various mechanisms, as explained in the * + * description. * + * * + ************************************************************************/ + +typedef union ii_wstat_u { + u64 ii_wstat_regval; + struct { + u64 w_pending:4; + u64 w_xt_crd_to:1; + u64 w_xt_tail_to:1; + u64 w_rsvd_3:3; + u64 w_tx_mx_rty:1; + u64 w_rsvd_2:6; + u64 w_llp_tx_cnt:8; + u64 w_rsvd_1:8; + u64 w_crazy:1; + u64 w_rsvd:31; + } ii_wstat_fld_s; +} ii_wstat_u_t; + +/************************************************************************ + * * + * Description: This is a read-write enabled register. It controls * + * various aspects of the Crosstalk flow control. * + * * + ************************************************************************/ + +typedef union ii_wcr_u { + u64 ii_wcr_regval; + struct { + u64 w_wid:4; + u64 w_tag:1; + u64 w_rsvd_1:8; + u64 w_dst_crd:3; + u64 w_f_bad_pkt:1; + u64 w_dir_con:1; + u64 w_e_thresh:5; + u64 w_rsvd:41; + } ii_wcr_fld_s; +} ii_wcr_u_t; + +/************************************************************************ + * * + * Description: This register's value is a bit vector that guards * + * access to local registers within the II as well as to external * + * Crosstalk widgets. Each bit in the register corresponds to a * + * particular region in the system; a region consists of one, two or * + * four nodes (depending on the value of the REGION_SIZE field in the * + * LB_REV_ID register, which is documented in Section 8.3.1.1). The * + * protection provided by this register applies to PIO read * + * operations as well as PIO write operations. The II will perform a * + * PIO read or write request only if the bit for the requestor's * + * region is set; otherwise, the II will not perform the requested * + * operation and will return an error response. When a PIO read or * + * write request targets an external Crosstalk widget, then not only * + * must the bit for the requestor's region be set in the ILAPR, but * + * also the target widget's bit in the IOWA register must be set in * + * order for the II to perform the requested operation; otherwise, * + * the II will return an error response. Hence, the protection * + * provided by the IOWA register supplements the protection provided * + * by the ILAPR for requests that target external Crosstalk widgets. * + * This register itself can be accessed only by the nodes whose * + * region ID bits are enabled in this same register. It can also be * + * accessed through the IAlias space by the local processors. * + * The reset value of this register allows access by all nodes. * + * * + ************************************************************************/ + +typedef union ii_ilapr_u { + u64 ii_ilapr_regval; + struct { + u64 i_region:64; + } ii_ilapr_fld_s; +} ii_ilapr_u_t; + +/************************************************************************ + * * + * Description: A write to this register of the 64-bit value * + * "SGIrules" in ASCII, will cause the bit in the ILAPR register * + * corresponding to the region of the requestor to be set (allow * + * access). A write of any other value will be ignored. Access * + * protection for this register is "SGIrules". * + * This register can also be accessed through the IAlias space. * + * However, this access will not change the access permissions in the * + * ILAPR. * + * * + ************************************************************************/ + +typedef union ii_ilapo_u { + u64 ii_ilapo_regval; + struct { + u64 i_io_ovrride:64; + } ii_ilapo_fld_s; +} ii_ilapo_u_t; + +/************************************************************************ + * * + * This register qualifies all the PIO and Graphics writes launched * + * from the SHUB towards a widget. * + * * + ************************************************************************/ + +typedef union ii_iowa_u { + u64 ii_iowa_regval; + struct { + u64 i_w0_oac:1; + u64 i_rsvd_1:7; + u64 i_wx_oac:8; + u64 i_rsvd:48; + } ii_iowa_fld_s; +} ii_iowa_u_t; + +/************************************************************************ + * * + * Description: This register qualifies all the requests launched * + * from a widget towards the Shub. This register is intended to be * + * used by software in case of misbehaving widgets. * + * * + * * + ************************************************************************/ + +typedef union ii_iiwa_u { + u64 ii_iiwa_regval; + struct { + u64 i_w0_iac:1; + u64 i_rsvd_1:7; + u64 i_wx_iac:8; + u64 i_rsvd:48; + } ii_iiwa_fld_s; +} ii_iiwa_u_t; + +/************************************************************************ + * * + * Description: This register qualifies all the operations launched * + * from a widget towards the SHub. It allows individual access * + * control for up to 8 devices per widget. A device refers to * + * individual DMA master hosted by a widget. * + * The bits in each field of this register are cleared by the Shub * + * upon detection of an error which requires the device to be * + * disabled. These fields assume that 0=TNUM=7 (i.e., Bridge-centric * + * Crosstalk). Whether or not a device has access rights to this * + * Shub is determined by an AND of the device enable bit in the * + * appropriate field of this register and the corresponding bit in * + * the Wx_IAC field (for the widget which this device belongs to). * + * The bits in this field are set by writing a 1 to them. Incoming * + * replies from Crosstalk are not subject to this access control * + * mechanism. * + * * + ************************************************************************/ + +typedef union ii_iidem_u { + u64 ii_iidem_regval; + struct { + u64 i_w8_dxs:8; + u64 i_w9_dxs:8; + u64 i_wa_dxs:8; + u64 i_wb_dxs:8; + u64 i_wc_dxs:8; + u64 i_wd_dxs:8; + u64 i_we_dxs:8; + u64 i_wf_dxs:8; + } ii_iidem_fld_s; +} ii_iidem_u_t; + +/************************************************************************ + * * + * This register contains the various programmable fields necessary * + * for controlling and observing the LLP signals. * + * * + ************************************************************************/ + +typedef union ii_ilcsr_u { + u64 ii_ilcsr_regval; + struct { + u64 i_nullto:6; + u64 i_rsvd_4:2; + u64 i_wrmrst:1; + u64 i_rsvd_3:1; + u64 i_llp_en:1; + u64 i_bm8:1; + u64 i_llp_stat:2; + u64 i_remote_power:1; + u64 i_rsvd_2:1; + u64 i_maxrtry:10; + u64 i_d_avail_sel:2; + u64 i_rsvd_1:4; + u64 i_maxbrst:10; + u64 i_rsvd:22; + + } ii_ilcsr_fld_s; +} ii_ilcsr_u_t; + +/************************************************************************ + * * + * This is simply a status registers that monitors the LLP error * + * rate. * + * * + ************************************************************************/ + +typedef union ii_illr_u { + u64 ii_illr_regval; + struct { + u64 i_sn_cnt:16; + u64 i_cb_cnt:16; + u64 i_rsvd:32; + } ii_illr_fld_s; +} ii_illr_u_t; + +/************************************************************************ + * * + * Description: All II-detected non-BTE error interrupts are * + * specified via this register. * + * NOTE: The PI interrupt register address is hardcoded in the II. If * + * PI_ID==0, then the II sends an interrupt request (Duplonet PWRI * + * packet) to address offset 0x0180_0090 within the local register * + * address space of PI0 on the node specified by the NODE field. If * + * PI_ID==1, then the II sends the interrupt request to address * + * offset 0x01A0_0090 within the local register address space of PI1 * + * on the node specified by the NODE field. * + * * + ************************************************************************/ + +typedef union ii_iidsr_u { + u64 ii_iidsr_regval; + struct { + u64 i_level:8; + u64 i_pi_id:1; + u64 i_node:11; + u64 i_rsvd_3:4; + u64 i_enable:1; + u64 i_rsvd_2:3; + u64 i_int_sent:2; + u64 i_rsvd_1:2; + u64 i_pi0_forward_int:1; + u64 i_pi1_forward_int:1; + u64 i_rsvd:30; + } ii_iidsr_fld_s; +} ii_iidsr_u_t; + +/************************************************************************ + * * + * There are two instances of this register. This register is used * + * for matching up the incoming responses from the graphics widget to * + * the processor that initiated the graphics operation. The * + * write-responses are converted to graphics credits and returned to * + * the processor so that the processor interface can manage the flow * + * control. * + * * + ************************************************************************/ + +typedef union ii_igfx0_u { + u64 ii_igfx0_regval; + struct { + u64 i_w_num:4; + u64 i_pi_id:1; + u64 i_n_num:12; + u64 i_p_num:1; + u64 i_rsvd:46; + } ii_igfx0_fld_s; +} ii_igfx0_u_t; + +/************************************************************************ + * * + * There are two instances of this register. This register is used * + * for matching up the incoming responses from the graphics widget to * + * the processor that initiated the graphics operation. The * + * write-responses are converted to graphics credits and returned to * + * the processor so that the processor interface can manage the flow * + * control. * + * * + ************************************************************************/ + +typedef union ii_igfx1_u { + u64 ii_igfx1_regval; + struct { + u64 i_w_num:4; + u64 i_pi_id:1; + u64 i_n_num:12; + u64 i_p_num:1; + u64 i_rsvd:46; + } ii_igfx1_fld_s; +} ii_igfx1_u_t; + +/************************************************************************ + * * + * There are two instances of this registers. These registers are * + * used as scratch registers for software use. * + * * + ************************************************************************/ + +typedef union ii_iscr0_u { + u64 ii_iscr0_regval; + struct { + u64 i_scratch:64; + } ii_iscr0_fld_s; +} ii_iscr0_u_t; + +/************************************************************************ + * * + * There are two instances of this registers. These registers are * + * used as scratch registers for software use. * + * * + ************************************************************************/ + +typedef union ii_iscr1_u { + u64 ii_iscr1_regval; + struct { + u64 i_scratch:64; + } ii_iscr1_fld_s; +} ii_iscr1_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the SHub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte1_u { + u64 ii_itte1_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte1_fld_s; +} ii_itte1_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte2_u { + u64 ii_itte2_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte2_fld_s; +} ii_itte2_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the SHub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte3_u { + u64 ii_itte3_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte3_fld_s; +} ii_itte3_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a SHub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the SHub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the SHub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte4_u { + u64 ii_itte4_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte4_fld_s; +} ii_itte4_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a SHub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte5_u { + u64 ii_itte5_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte5_fld_s; +} ii_itte5_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the Shub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte6_u { + u64 ii_itte6_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte6_fld_s; +} ii_itte6_u_t; + +/************************************************************************ + * * + * Description: There are seven instances of translation table entry * + * registers. Each register maps a Shub Big Window to a 48-bit * + * address on Crosstalk. * + * For M-mode (128 nodes, 8 GBytes/node), SysAD[31:29] (Big Window * + * number) are used to select one of these 7 registers. The Widget * + * number field is then derived from the W_NUM field for synthesizing * + * a Crosstalk packet. The 5 bits of OFFSET are concatenated with * + * SysAD[28:0] to form Crosstalk[33:0]. The upper Crosstalk[47:34] * + * are padded with zeros. Although the maximum Crosstalk space * + * addressable by the Shub is thus the lower 16 GBytes per widget * + * (M-mode), however only <SUP >7</SUP>/<SUB >32nds</SUB> of this * + * space can be accessed. * + * For the N-mode (256 nodes, 4 GBytes/node), SysAD[30:28] (Big * + * Window number) are used to select one of these 7 registers. The * + * Widget number field is then derived from the W_NUM field for * + * synthesizing a Crosstalk packet. The 5 bits of OFFSET are * + * concatenated with SysAD[27:0] to form Crosstalk[33:0]. The IOSP * + * field is used as Crosstalk[47], and remainder of the Crosstalk * + * address bits (Crosstalk[46:34]) are always zero. While the maximum * + * Crosstalk space addressable by the SHub is thus the lower * + * 8-GBytes per widget (N-mode), only <SUP >7</SUP>/<SUB >32nds</SUB> * + * of this space can be accessed. * + * * + ************************************************************************/ + +typedef union ii_itte7_u { + u64 ii_itte7_regval; + struct { + u64 i_offset:5; + u64 i_rsvd_1:3; + u64 i_w_num:4; + u64 i_iosp:1; + u64 i_rsvd:51; + } ii_itte7_fld_s; +} ii_itte7_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprb0_u { + u64 ii_iprb0_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprb0_fld_s; +} ii_iprb0_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprb8_u { + u64 ii_iprb8_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprb8_fld_s; +} ii_iprb8_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprb9_u { + u64 ii_iprb9_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprb9_fld_s; +} ii_iprb9_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * * + * * + ************************************************************************/ + +typedef union ii_iprba_u { + u64 ii_iprba_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprba_fld_s; +} ii_iprba_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbb_u { + u64 ii_iprbb_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprbb_fld_s; +} ii_iprbb_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbc_u { + u64 ii_iprbc_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprbc_fld_s; +} ii_iprbc_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbd_u { + u64 ii_iprbd_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprbd_fld_s; +} ii_iprbd_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of SHub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbe_u { + u64 ii_iprbe_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprbe_fld_s; +} ii_iprbe_u_t; + +/************************************************************************ + * * + * Description: There are 9 instances of this register, one per * + * actual widget in this implementation of Shub and Crossbow. * + * Note: Crossbow only has ports for Widgets 8 through F, widget 0 * + * refers to Crossbow's internal space. * + * This register contains the state elements per widget that are * + * necessary to manage the PIO flow control on Crosstalk and on the * + * Router Network. See the PIO Flow Control chapter for a complete * + * description of this register * + * The SPUR_WR bit requires some explanation. When this register is * + * written, the new value of the C field is captured in an internal * + * register so the hardware can remember what the programmer wrote * + * into the credit counter. The SPUR_WR bit sets whenever the C field * + * increments above this stored value, which indicates that there * + * have been more responses received than requests sent. The SPUR_WR * + * bit cannot be cleared until a value is written to the IPRBx * + * register; the write will correct the C field and capture its new * + * value in the internal register. Even if IECLR[E_PRB_x] is set, the * + * SPUR_WR bit will persist if IPRBx hasn't yet been written. * + * . * + * * + ************************************************************************/ + +typedef union ii_iprbf_u { + u64 ii_iprbf_regval; + struct { + u64 i_c:8; + u64 i_na:14; + u64 i_rsvd_2:2; + u64 i_nb:14; + u64 i_rsvd_1:2; + u64 i_m:2; + u64 i_f:1; + u64 i_of_cnt:5; + u64 i_error:1; + u64 i_rd_to:1; + u64 i_spur_wr:1; + u64 i_spur_rd:1; + u64 i_rsvd:11; + u64 i_mult_err:1; + } ii_iprbe_fld_s; +} ii_iprbf_u_t; + +/************************************************************************ + * * + * This register specifies the timeout value to use for monitoring * + * Crosstalk credits which are used outbound to Crosstalk. An * + * internal counter called the Crosstalk Credit Timeout Counter * + * increments every 128 II clocks. The counter starts counting * + * anytime the credit count drops below a threshold, and resets to * + * zero (stops counting) anytime the credit count is at or above the * + * threshold. The threshold is 1 credit in direct connect mode and 2 * + * in Crossbow connect mode. When the internal Crosstalk Credit * + * Timeout Counter reaches the value programmed in this register, a * + * Crosstalk Credit Timeout has occurred. The internal counter is not * + * readable from software, and stops counting at its maximum value, * + * so it cannot cause more than one interrupt. * + * * + ************************************************************************/ + +typedef union ii_ixcc_u { + u64 ii_ixcc_regval; + struct { + u64 i_time_out:26; + u64 i_rsvd:38; + } ii_ixcc_fld_s; +} ii_ixcc_u_t; + +/************************************************************************ + * * + * Description: This register qualifies all the PIO and DMA * + * operations launched from widget 0 towards the SHub. In * + * addition, it also qualifies accesses by the BTE streams. * + * The bits in each field of this register are cleared by the SHub * + * upon detection of an error which requires widget 0 or the BTE * + * streams to be terminated. Whether or not widget x has access * + * rights to this SHub is determined by an AND of the device * + * enable bit in the appropriate field of this register and bit 0 in * + * the Wx_IAC field. The bits in this field are set by writing a 1 to * + * them. Incoming replies from Crosstalk are not subject to this * + * access control mechanism. * + * * + ************************************************************************/ + +typedef union ii_imem_u { + u64 ii_imem_regval; + struct { + u64 i_w0_esd:1; + u64 i_rsvd_3:3; + u64 i_b0_esd:1; + u64 i_rsvd_2:3; + u64 i_b1_esd:1; + u64 i_rsvd_1:3; + u64 i_clr_precise:1; + u64 i_rsvd:51; + } ii_imem_fld_s; +} ii_imem_u_t; + +/************************************************************************ + * * + * Description: This register specifies the timeout value to use for * + * monitoring Crosstalk tail flits coming into the Shub in the * + * TAIL_TO field. An internal counter associated with this register * + * is incremented every 128 II internal clocks (7 bits). The counter * + * starts counting anytime a header micropacket is received and stops * + * counting (and resets to zero) any time a micropacket with a Tail * + * bit is received. Once the counter reaches the threshold value * + * programmed in this register, it generates an interrupt to the * + * processor that is programmed into the IIDSR. The counter saturates * + * (does not roll over) at its maximum value, so it cannot cause * + * another interrupt until after it is cleared. * + * The register also contains the Read Response Timeout values. The * + * Prescalar is 23 bits, and counts II clocks. An internal counter * + * increments on every II clock and when it reaches the value in the * + * Prescalar field, all IPRTE registers with their valid bits set * + * have their Read Response timers bumped. Whenever any of them match * + * the value in the RRSP_TO field, a Read Response Timeout has * + * occurred, and error handling occurs as described in the Error * + * Handling section of this document. * + * * + ************************************************************************/ + +typedef union ii_ixtt_u { + u64 ii_ixtt_regval; + struct { + u64 i_tail_to:26; + u64 i_rsvd_1:6; + u64 i_rrsp_ps:23; + u64 i_rrsp_to:5; + u64 i_rsvd:4; + } ii_ixtt_fld_s; +} ii_ixtt_u_t; + +/************************************************************************ + * * + * Writing a 1 to the fields of this register clears the appropriate * + * error bits in other areas of SHub. Note that when the * + * E_PRB_x bits are used to clear error bits in PRB registers, * + * SPUR_RD and SPUR_WR may persist, because they require additional * + * action to clear them. See the IPRBx and IXSS Register * + * specifications. * + * * + ************************************************************************/ + +typedef union ii_ieclr_u { + u64 ii_ieclr_regval; + struct { + u64 i_e_prb_0:1; + u64 i_rsvd:7; + u64 i_e_prb_8:1; + u64 i_e_prb_9:1; + u64 i_e_prb_a:1; + u64 i_e_prb_b:1; + u64 i_e_prb_c:1; + u64 i_e_prb_d:1; + u64 i_e_prb_e:1; + u64 i_e_prb_f:1; + u64 i_e_crazy:1; + u64 i_e_bte_0:1; + u64 i_e_bte_1:1; + u64 i_reserved_1:10; + u64 i_spur_rd_hdr:1; + u64 i_cam_intr_to:1; + u64 i_cam_overflow:1; + u64 i_cam_read_miss:1; + u64 i_ioq_rep_underflow:1; + u64 i_ioq_req_underflow:1; + u64 i_ioq_rep_overflow:1; + u64 i_ioq_req_overflow:1; + u64 i_iiq_rep_overflow:1; + u64 i_iiq_req_overflow:1; + u64 i_ii_xn_rep_cred_overflow:1; + u64 i_ii_xn_req_cred_overflow:1; + u64 i_ii_xn_invalid_cmd:1; + u64 i_xn_ii_invalid_cmd:1; + u64 i_reserved_2:21; + } ii_ieclr_fld_s; +} ii_ieclr_u_t; + +/************************************************************************ + * * + * This register controls both BTEs. SOFT_RESET is intended for * + * recovery after an error. COUNT controls the total number of CRBs * + * that both BTEs (combined) can use, which affects total BTE * + * bandwidth. * + * * + ************************************************************************/ + +typedef union ii_ibcr_u { + u64 ii_ibcr_regval; + struct { + u64 i_count:4; + u64 i_rsvd_1:4; + u64 i_soft_reset:1; + u64 i_rsvd:55; + } ii_ibcr_fld_s; +} ii_ibcr_u_t; + +/************************************************************************ + * * + * This register contains the header of a spurious read response * + * received from Crosstalk. A spurious read response is defined as a * + * read response received by II from a widget for which (1) the SIDN * + * has a value between 1 and 7, inclusive (II never sends requests to * + * these widgets (2) there is no valid IPRTE register which * + * corresponds to the TNUM, or (3) the widget indicated in SIDN is * + * not the same as the widget recorded in the IPRTE register * + * referenced by the TNUM. If this condition is true, and if the * + * IXSS[VALID] bit is clear, then the header of the spurious read * + * response is capture in IXSM and IXSS, and IXSS[VALID] is set. The * + * errant header is thereby captured, and no further spurious read * + * respones are captured until IXSS[VALID] is cleared by setting the * + * appropriate bit in IECLR.Everytime a spurious read response is * + * detected, the SPUR_RD bit of the PRB corresponding to the incoming * + * message's SIDN field is set. This always happens, regarless of * + * whether a header is captured. The programmer should check * + * IXSM[SIDN] to determine which widget sent the spurious response, * + * because there may be more than one SPUR_RD bit set in the PRB * + * registers. The widget indicated by IXSM[SIDN] was the first * + * spurious read response to be received since the last time * + * IXSS[VALID] was clear. The SPUR_RD bit of the corresponding PRB * + * will be set. Any SPUR_RD bits in any other PRB registers indicate * + * spurious messages from other widets which were detected after the * + * header was captured.. * + * * + ************************************************************************/ + +typedef union ii_ixsm_u { + u64 ii_ixsm_regval; + struct { + u64 i_byte_en:32; + u64 i_reserved:1; + u64 i_tag:3; + u64 i_alt_pactyp:4; + u64 i_bo:1; + u64 i_error:1; + u64 i_vbpm:1; + u64 i_gbr:1; + u64 i_ds:2; + u64 i_ct:1; + u64 i_tnum:5; + u64 i_pactyp:4; + u64 i_sidn:4; + u64 i_didn:4; + } ii_ixsm_fld_s; +} ii_ixsm_u_t; + +/************************************************************************ + * * + * This register contains the sideband bits of a spurious read * + * response received from Crosstalk. * + * * + ************************************************************************/ + +typedef union ii_ixss_u { + u64 ii_ixss_regval; + struct { + u64 i_sideband:8; + u64 i_rsvd:55; + u64 i_valid:1; + } ii_ixss_fld_s; +} ii_ixss_u_t; + +/************************************************************************ + * * + * This register enables software to access the II LLP's test port. * + * Refer to the LLP 2.5 documentation for an explanation of the test * + * port. Software can write to this register to program the values * + * for the control fields (TestErrCapture, TestClear, TestFlit, * + * TestMask and TestSeed). Similarly, software can read from this * + * register to obtain the values of the test port's status outputs * + * (TestCBerr, TestValid and TestData). * + * * + ************************************************************************/ + +typedef union ii_ilct_u { + u64 ii_ilct_regval; + struct { + u64 i_test_seed:20; + u64 i_test_mask:8; + u64 i_test_data:20; + u64 i_test_valid:1; + u64 i_test_cberr:1; + u64 i_test_flit:3; + u64 i_test_clear:1; + u64 i_test_err_capture:1; + u64 i_rsvd:9; + } ii_ilct_fld_s; +} ii_ilct_u_t; + +/************************************************************************ + * * + * If the II detects an illegal incoming Duplonet packet (request or * + * reply) when VALID==0 in the IIEPH1 register, then it saves the * + * contents of the packet's header flit in the IIEPH1 and IIEPH2 * + * registers, sets the VALID bit in IIEPH1, clears the OVERRUN bit, * + * and assigns a value to the ERR_TYPE field which indicates the * + * specific nature of the error. The II recognizes four different * + * types of errors: short request packets (ERR_TYPE==2), short reply * + * packets (ERR_TYPE==3), long request packets (ERR_TYPE==4) and long * + * reply packets (ERR_TYPE==5). The encodings for these types of * + * errors were chosen to be consistent with the same types of errors * + * indicated by the ERR_TYPE field in the LB_ERROR_HDR1 register (in * + * the LB unit). If the II detects an illegal incoming Duplonet * + * packet when VALID==1 in the IIEPH1 register, then it merely sets * + * the OVERRUN bit to indicate that a subsequent error has happened, * + * and does nothing further. * + * * + ************************************************************************/ + +typedef union ii_iieph1_u { + u64 ii_iieph1_regval; + struct { + u64 i_command:7; + u64 i_rsvd_5:1; + u64 i_suppl:14; + u64 i_rsvd_4:1; + u64 i_source:14; + u64 i_rsvd_3:1; + u64 i_err_type:4; + u64 i_rsvd_2:4; + u64 i_overrun:1; + u64 i_rsvd_1:3; + u64 i_valid:1; + u64 i_rsvd:13; + } ii_iieph1_fld_s; +} ii_iieph1_u_t; + +/************************************************************************ + * * + * This register holds the Address field from the header flit of an * + * incoming erroneous Duplonet packet, along with the tail bit which * + * accompanied this header flit. This register is essentially an * + * extension of IIEPH1. Two registers were necessary because the 64 * + * bits available in only a single register were insufficient to * + * capture the entire header flit of an erroneous packet. * + * * + ************************************************************************/ + +typedef union ii_iieph2_u { + u64 ii_iieph2_regval; + struct { + u64 i_rsvd_0:3; + u64 i_address:47; + u64 i_rsvd_1:10; + u64 i_tail:1; + u64 i_rsvd:3; + } ii_iieph2_fld_s; +} ii_iieph2_u_t; + +/******************************/ + +/************************************************************************ + * * + * This register's value is a bit vector that guards access from SXBs * + * to local registers within the II as well as to external Crosstalk * + * widgets * + * * + ************************************************************************/ + +typedef union ii_islapr_u { + u64 ii_islapr_regval; + struct { + u64 i_region:64; + } ii_islapr_fld_s; +} ii_islapr_u_t; + +/************************************************************************ + * * + * A write to this register of the 56-bit value "Pup+Bun" will cause * + * the bit in the ISLAPR register corresponding to the region of the * + * requestor to be set (access allowed). ( + * * + ************************************************************************/ + +typedef union ii_islapo_u { + u64 ii_islapo_regval; + struct { + u64 i_io_sbx_ovrride:56; + u64 i_rsvd:8; + } ii_islapo_fld_s; +} ii_islapo_u_t; + +/************************************************************************ + * * + * Determines how long the wrapper will wait aftr an interrupt is * + * initially issued from the II before it times out the outstanding * + * interrupt and drops it from the interrupt queue. * + * * + ************************************************************************/ + +typedef union ii_iwi_u { + u64 ii_iwi_regval; + struct { + u64 i_prescale:24; + u64 i_rsvd:8; + u64 i_timeout:8; + u64 i_rsvd1:8; + u64 i_intrpt_retry_period:8; + u64 i_rsvd2:8; + } ii_iwi_fld_s; +} ii_iwi_u_t; + +/************************************************************************ + * * + * Log errors which have occurred in the II wrapper. The errors are * + * cleared by writing to the IECLR register. * + * * + ************************************************************************/ + +typedef union ii_iwel_u { + u64 ii_iwel_regval; + struct { + u64 i_intr_timed_out:1; + u64 i_rsvd:7; + u64 i_cam_overflow:1; + u64 i_cam_read_miss:1; + u64 i_rsvd1:2; + u64 i_ioq_rep_underflow:1; + u64 i_ioq_req_underflow:1; + u64 i_ioq_rep_overflow:1; + u64 i_ioq_req_overflow:1; + u64 i_iiq_rep_overflow:1; + u64 i_iiq_req_overflow:1; + u64 i_rsvd2:6; + u64 i_ii_xn_rep_cred_over_under:1; + u64 i_ii_xn_req_cred_over_under:1; + u64 i_rsvd3:6; + u64 i_ii_xn_invalid_cmd:1; + u64 i_xn_ii_invalid_cmd:1; + u64 i_rsvd4:30; + } ii_iwel_fld_s; +} ii_iwel_u_t; + +/************************************************************************ + * * + * Controls the II wrapper. * + * * + ************************************************************************/ + +typedef union ii_iwc_u { + u64 ii_iwc_regval; + struct { + u64 i_dma_byte_swap:1; + u64 i_rsvd:3; + u64 i_cam_read_lines_reset:1; + u64 i_rsvd1:3; + u64 i_ii_xn_cred_over_under_log:1; + u64 i_rsvd2:19; + u64 i_xn_rep_iq_depth:5; + u64 i_rsvd3:3; + u64 i_xn_req_iq_depth:5; + u64 i_rsvd4:3; + u64 i_iiq_depth:6; + u64 i_rsvd5:12; + u64 i_force_rep_cred:1; + u64 i_force_req_cred:1; + } ii_iwc_fld_s; +} ii_iwc_u_t; + +/************************************************************************ + * * + * Status in the II wrapper. * + * * + ************************************************************************/ + +typedef union ii_iws_u { + u64 ii_iws_regval; + struct { + u64 i_xn_rep_iq_credits:5; + u64 i_rsvd:3; + u64 i_xn_req_iq_credits:5; + u64 i_rsvd1:51; + } ii_iws_fld_s; +} ii_iws_u_t; + +/************************************************************************ + * * + * Masks errors in the IWEL register. * + * * + ************************************************************************/ + +typedef union ii_iweim_u { + u64 ii_iweim_regval; + struct { + u64 i_intr_timed_out:1; + u64 i_rsvd:7; + u64 i_cam_overflow:1; + u64 i_cam_read_miss:1; + u64 i_rsvd1:2; + u64 i_ioq_rep_underflow:1; + u64 i_ioq_req_underflow:1; + u64 i_ioq_rep_overflow:1; + u64 i_ioq_req_overflow:1; + u64 i_iiq_rep_overflow:1; + u64 i_iiq_req_overflow:1; + u64 i_rsvd2:6; + u64 i_ii_xn_rep_cred_overflow:1; + u64 i_ii_xn_req_cred_overflow:1; + u64 i_rsvd3:6; + u64 i_ii_xn_invalid_cmd:1; + u64 i_xn_ii_invalid_cmd:1; + u64 i_rsvd4:30; + } ii_iweim_fld_s; +} ii_iweim_u_t; + +/************************************************************************ + * * + * A write to this register causes a particular field in the * + * corresponding widget's PRB entry to be adjusted up or down by 1. * + * This counter should be used when recovering from error and reset * + * conditions. Note that software would be capable of causing * + * inadvertent overflow or underflow of these counters. * + * * + ************************************************************************/ + +typedef union ii_ipca_u { + u64 ii_ipca_regval; + struct { + u64 i_wid:4; + u64 i_adjust:1; + u64 i_rsvd_1:3; + u64 i_field:2; + u64 i_rsvd:54; + } ii_ipca_fld_s; +} ii_ipca_u_t; + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte0a_u { + u64 ii_iprte0a_regval; + struct { + u64 i_rsvd_1:54; + u64 i_widget:4; + u64 i_to_cnt:5; + u64 i_vld:1; + } ii_iprte0a_fld_s; +} ii_iprte0a_u_t; + +/************************************************************************ + * * + * There are 8 instances of this register. This register contains * + * the information that the II has to remember once it has launched a * + * PIO Read operation. The contents are used to form the correct * + * Router Network packet and direct the Crosstalk reply to the * + * appropriate processor. * + * * + ************************************************************************/ + +typedef union ii_iprte1a_u { + u64 ii_iprte1a_regval; + struct { _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |