[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] Merged.
# HG changeset patch # User emellor@xxxxxxxxxxxxxxxxxxxxxx # Node ID ece9b5710b291e82b52064308dcbe3135fd90633 # Parent 09967f2d6e3b818c3eb79bbe006f95e675acf711 # Parent 8f7aad20b4a5ba33762db56bb7e5cb94fe24395e Merged. diff -r 09967f2d6e3b -r ece9b5710b29 linux-2.6-xen-sparse/drivers/xen/netback/netback.c --- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Apr 5 23:59:06 2006 +++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c Wed Apr 5 23:59:18 2006 @@ -329,9 +329,9 @@ irq = netif->irq; id = RING_GET_REQUEST(&netif->rx, netif->rx.rsp_prod_pvt)->id; flags = 0; - if (skb->ip_summed == CHECKSUM_HW) - flags |= NETRXF_csum_blank; - if (skb->proto_data_valid) + if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ + flags |= NETRXF_csum_blank | NETRXF_data_validated; + else if (skb->proto_data_valid) /* remote but checksummed? */ flags |= NETRXF_data_validated; if (make_rx_response(netif, id, status, (unsigned long)skb->data & ~PAGE_MASK, @@ -658,7 +658,11 @@ skb->dev = netif->dev; skb->protocol = eth_type_trans(skb, skb->dev); - if (txreq.flags & NETTXF_data_validated) { + /* + * Old frontends do not assert data_validated but we + * can infer it from csum_blank so test both flags. + */ + if (txreq.flags & (NETTXF_data_validated|NETTXF_csum_blank)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->proto_data_valid = 1; } else { diff -r 09967f2d6e3b -r ece9b5710b29 linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Wed Apr 5 23:59:06 2006 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c Wed Apr 5 23:59:18 2006 @@ -698,9 +698,9 @@ tx->size = skb->len; tx->flags = 0; - if (skb->ip_summed == CHECKSUM_HW) - tx->flags |= NETTXF_csum_blank; - if (skb->proto_data_valid) + if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ + tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; + if (skb->proto_data_valid) /* remote but checksummed? */ tx->flags |= NETTXF_data_validated; np->tx.req_prod_pvt = i + 1; @@ -816,7 +816,11 @@ skb->len = rx->status; skb->tail = skb->data + skb->len; - if (rx->flags & NETRXF_data_validated) { + /* + * Old backends do not assert data_validated but we + * can infer it from csum_blank so test both flags. + */ + if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank)) { skb->ip_summed = CHECKSUM_UNNECESSARY; skb->proto_data_valid = 1; } else { @@ -1017,8 +1021,11 @@ tx->gref = np->grant_tx_ref[i]; tx->offset = (unsigned long)skb->data & ~PAGE_MASK; tx->size = skb->len; - tx->flags = (skb->ip_summed == CHECKSUM_HW) ? - NETTXF_csum_blank : 0; + tx->flags = 0; + if (skb->ip_summed == CHECKSUM_HW) /* local packet? */ + tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; + if (skb->proto_data_valid) /* remote but checksummed? */ + tx->flags |= NETTXF_data_validated; np->stats.tx_bytes += skb->len; np->stats.tx_packets++; diff -r 09967f2d6e3b -r ece9b5710b29 linux-2.6-xen-sparse/drivers/xen/pciback/conf_space.c --- a/linux-2.6-xen-sparse/drivers/xen/pciback/conf_space.c Wed Apr 5 23:59:06 2006 +++ b/linux-2.6-xen-sparse/drivers/xen/pciback/conf_space.c Wed Apr 5 23:59:18 2006 @@ -106,7 +106,7 @@ } static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask, - u32 offset) + int offset) { if (offset >= 0) { new_val_mask <<= (offset * 8); @@ -180,7 +180,8 @@ if ((req_start >= field_start && req_start < field_end) || (req_end > field_start && req_end <= field_end)) { - err = conf_space_read(dev, cfg_entry, offset, &tmp_val); + err = conf_space_read(dev, cfg_entry, field_start, + &tmp_val); if (err) goto out; @@ -228,14 +229,16 @@ || (req_end > field_start && req_end <= field_end)) { tmp_val = 0; - err = pciback_config_read(dev, offset, size, &tmp_val); + err = pciback_config_read(dev, field_start, + field->size, &tmp_val); if (err) break; tmp_val = merge_value(tmp_val, value, get_mask(size), - field_start - req_start); - - err = conf_space_write(dev, cfg_entry, offset, tmp_val); + req_start - field_start); + + err = conf_space_write(dev, cfg_entry, field_start, + tmp_val); handled = 1; } } diff -r 09967f2d6e3b -r ece9b5710b29 tools/Rules.mk --- a/tools/Rules.mk Wed Apr 5 23:59:06 2006 +++ b/tools/Rules.mk Wed Apr 5 23:59:18 2006 @@ -11,6 +11,8 @@ XEN_LIBXENSTAT = $(XEN_ROOT)/tools/xenstat/libxenstat/src X11_LDPATH = -L/usr/X11R6/$(LIBDIR) + +CFLAGS += -D__XEN_INTERFACE_VERSION__=0x00030101 %.opic: %.c $(CC) $(CPPFLAGS) -DPIC $(CFLAGS) -fPIC -c -o $@ $< diff -r 09967f2d6e3b -r ece9b5710b29 tools/debugger/gdb/README --- a/tools/debugger/gdb/README Wed Apr 5 23:59:06 2006 +++ b/tools/debugger/gdb/README Wed Apr 5 23:59:18 2006 @@ -1,16 +1,17 @@ -DomU GDB server for 32-bit (PAE and non-PAE) systems +DomU & HVM GDB server for 32-bit (PAE and non-PAE) and x86_64 systems ---------------------------------------------------- Lines marked below with [*] are optional, if you want full source-level debugging of your kernel image. To build the GDB server: + 0. Build rest of the Xen first from the base directory 1. Run ./gdbbuild from within this directory. 2. Copy ./gdb-6.2.1-linux-i386-xen/gdb/gdbserver/gdbserver-xen to your test machine. -To build a debuggable guest kernel image: +To build a debuggable guest domU kernel image: 1. cd linux-2.6.xx-xenU 2. make menuconfig 3. From within the configurator, enable the following options: @@ -28,7 +29,7 @@ # bt # disass -To debug a crashed guest: +To debug a crashed domU guest: 1. Add '(enable-dump yes)' to /etc/xen/xend-config.sxp before starting xend. 2. When the domain crashes, a core file is written to diff -r 09967f2d6e3b -r ece9b5710b29 xen/Makefile --- a/xen/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/Makefile Wed Apr 5 23:59:18 2006 @@ -1,27 +1,20 @@ -INSTALL = install -INSTALL_DATA = $(INSTALL) -m0644 -INSTALL_DIR = $(INSTALL) -d -m0755 - # This is the correct place to edit the build version. # All other places this is stored (eg. compile.h) should be autogenerated. -export XEN_VERSION = 3 -export XEN_SUBVERSION = 0 -export XEN_EXTRAVERSION = -unstable -export XEN_FULLVERSION = $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION) +export XEN_VERSION := 3 +export XEN_SUBVERSION := 0 +export XEN_EXTRAVERSION := -unstable +export XEN_FULLVERSION := $(XEN_VERSION).$(XEN_SUBVERSION)$(XEN_EXTRAVERSION) -export BASEDIR := $(CURDIR) - -include Rules.mk +export BASEDIR := $(CURDIR) default: build -$(TARGET).gz: $(TARGET) - gzip -f -9 < $< > $@.new - mv $@.new $@ -debug: - objdump -D -S $(TARGET)-syms > $(TARGET).s +ifeq ($(XEN_ROOT),) -dist: install +build install clean: + make -f Rules.mk $@ + +else build: $(TARGET).gz @@ -38,24 +31,35 @@ $(INSTALL_DATA) include/public/io/*.h $(DESTDIR)/usr/include/xen/io $(INSTALL_DATA) include/public/COPYING $(DESTDIR)/usr/include/xen -clean: delete-unfresh-files +clean:: delete-unfresh-files $(MAKE) -C tools clean - $(MAKE) -C common clean - $(MAKE) -C drivers clean - $(MAKE) -C acm clean - $(MAKE) -C arch/$(TARGET_ARCH) clean + $(MAKE) -f $(BASEDIR)/Rules.mk -C common clean + $(MAKE) -f $(BASEDIR)/Rules.mk -C drivers clean + $(MAKE) -f $(BASEDIR)/Rules.mk -C acm clean + $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) clean rm -f include/asm *.o $(TARGET)* *~ core rm -f include/asm-*/asm-offsets.h rm -f include/xen/acm_policy.h +endif + +dist: install + +debug: FORCE + objdump -D -S $(TARGET)-syms > $(TARGET).s + +$(TARGET).gz: $(TARGET) + gzip -f -9 < $< > $@.new + mv $@.new $@ + $(TARGET): delete-unfresh-files $(MAKE) -C tools - $(MAKE) include/xen/compile.h - $(MAKE) include/xen/acm_policy.h + $(MAKE) -f $(BASEDIR)/Rules.mk include/xen/compile.h + $(MAKE) -f $(BASEDIR)/Rules.mk include/xen/acm_policy.h [ -e include/asm ] || ln -sf asm-$(TARGET_ARCH) include/asm - $(MAKE) -C arch/$(TARGET_ARCH) asm-offsets.s - $(MAKE) include/asm-$(TARGET_ARCH)/asm-offsets.h - $(MAKE) -C arch/$(TARGET_ARCH) $(TARGET) + $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) asm-offsets.s + $(MAKE) -f $(BASEDIR)/Rules.mk include/asm-$(TARGET_ARCH)/asm-offsets.h + $(MAKE) -f $(BASEDIR)/Rules.mk -C arch/$(TARGET_ARCH) $(TARGET) # drivers/char/console.o contains static banner/compile info. Blow it away. # Don't refresh these files during e.g., 'sudo make install' @@ -115,7 +119,7 @@ echo ""; \ echo "#endif") <$< >$@ -.PHONY: default debug install dist clean delete-unfresh-files TAGS tags +.PHONY: default debug build install dist clean delete-unfresh-files TAGS tags SUBDIRS = acm arch/$(TARGET_ARCH) common drivers define all_sources diff -r 09967f2d6e3b -r ece9b5710b29 xen/Rules.mk --- a/xen/Rules.mk Wed Apr 5 23:59:06 2006 +++ b/xen/Rules.mk Wed Apr 5 23:59:18 2006 @@ -26,17 +26,23 @@ override COMPILE_ARCH := $(patsubst x86%,x86,$(XEN_COMPILE_ARCH)) override TARGET_ARCH := $(patsubst x86%,x86,$(XEN_TARGET_ARCH)) -TARGET := $(BASEDIR)/xen -HDRS := $(wildcard $(BASEDIR)/include/xen/*.h) -HDRS += $(wildcard $(BASEDIR)/include/public/*.h) -HDRS += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/*.h) -HDRS += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/$(TARGET_SUBARCH)/*.h) -# Do not depend on auto-generated header files. -HDRS := $(subst $(BASEDIR)/include/asm-$(TARGET_ARCH)/asm-offsets.h,,$(HDRS)) -HDRS := $(subst $(BASEDIR)/include/xen/banner.h,,$(HDRS)) -HDRS := $(subst $(BASEDIR)/include/xen/compile.h,,$(HDRS)) +TARGET := $(BASEDIR)/xen + +HDRS := $(wildcard $(BASEDIR)/include/xen/*.h) +HDRS += $(wildcard $(BASEDIR)/include/public/*.h) +HDRS += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/*.h) +HDRS += $(wildcard $(BASEDIR)/include/asm-$(TARGET_ARCH)/$(TARGET_SUBARCH)/*.h) + +INSTALL := install +INSTALL_DATA := $(INSTALL) -m0644 +INSTALL_DIR := $(INSTALL) -d -m0755 include $(BASEDIR)/arch/$(TARGET_ARCH)/Rules.mk + +# Do not depend on auto-generated header files. +HDRS := $(subst $(BASEDIR)/include/asm-$(TARGET_ARCH)/asm-offsets.h,,$(HDRS)) +HDRS := $(subst $(BASEDIR)/include/xen/banner.h,,$(HDRS)) +HDRS := $(subst $(BASEDIR)/include/xen/compile.h,,$(HDRS)) # Note that link order matters! ALL_OBJS-y += $(BASEDIR)/common/built_in.o @@ -61,6 +67,36 @@ CFLAGS := $(strip $(CFLAGS) $(CFLAGS-y)) AFLAGS := $(strip $(AFLAGS) $(AFLAGS-y)) +include Makefile + +# Ensure each subdirectory has exactly one trailing slash. +subdir-n := $(patsubst %,%/,$(patsubst %/,%,$(subdir-n))) +subdir-y := $(patsubst %,%/,$(patsubst %/,%,$(subdir-y))) + +# Add explicitly declared subdirectories to the object list. +obj-y += $(patsubst %,%/built_in.o,$(subdir-y)) + +# Add implicitly declared subdirectories (in the object list) to the +# subdirectory list, and rewrite the object-list entry. +subdir-y += $(filter %/,$(obj-y)) +obj-y := $(patsubst %/,%/built-in.o,$(obj-y)) + +subdir-all := $(subdir-y) $(subdir-n) + +built_in.o: $(obj-y) + $(LD) $(LDFLAGS) -r -o $@ $^ + +.PHONY: FORCE +FORCE: + +%/built_in.o: FORCE + $(MAKE) -f $(BASEDIR)/Rules.mk -C $* built_in.o + +clean:: $(addprefix _clean_, $(subdir-all)) FORCE + rm -f *.o *~ core +_clean_%/: FORCE + $(MAKE) -f $(BASEDIR)/Rules.mk -C $* clean + %.o: %.c $(HDRS) Makefile $(CC) $(CFLAGS) -c $< -o $@ diff -r 09967f2d6e3b -r ece9b5710b29 xen/acm/Makefile --- a/xen/acm/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/acm/Makefile Wed Apr 5 23:59:18 2006 @@ -1,9 +1,5 @@ -include $(BASEDIR)/Rules.mk - obj-y += acm_core.o obj-y += acm_policy.o obj-y += acm_simple_type_enforcement_hooks.o obj-y += acm_chinesewall_hooks.o obj-y += acm_null_hooks.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/ia64/Makefile --- a/xen/arch/ia64/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/ia64/Makefile Wed Apr 5 23:59:18 2006 @@ -1,21 +1,17 @@ -include $(BASEDIR)/Rules.mk - subdir-y += xen subdir-y += vmx subdir-y += linux subdir-y += linux-xen -include $(BASEDIR)/Post.mk - $(TARGET)-syms: linux-xen/head.o $(ALL_OBJS) xen.lds.s $(LD) $(LDFLAGS) -T xen.lds.s -N \ -Map map.out linux-xen/head.o $(ALL_OBJS) -o $@ $(NM) -n $@ | $(BASEDIR)/tools/symbols > $(BASEDIR)/xen-syms.S - $(MAKE) $(BASEDIR)/xen-syms.o + $(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o $(LD) $(LDFLAGS) -T xen.lds.s -N \ -Map map.out linux-xen/head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@ $(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S - $(MAKE) $(BASEDIR)/xen-syms.o + $(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o $(LD) $(LDFLAGS) -T xen.lds.s -N \ -Map map.out linux-xen/head.o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@ rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/ia64/linux-xen/Makefile --- a/xen/arch/ia64/linux-xen/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/ia64/linux-xen/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += efi.o obj-y += entry.o obj-y += irq_ia64.o @@ -15,5 +13,3 @@ obj-y += tlb.o obj-y += unaligned.o obj-y += unwind.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/ia64/linux/Makefile --- a/xen/arch/ia64/linux/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/ia64/linux/Makefile Wed Apr 5 23:59:18 2006 @@ -1,6 +1,3 @@ -include $(BASEDIR)/Rules.mk - - obj-y += bitop.o obj-y += clear_page.o obj-y += cmdline.o @@ -25,8 +22,6 @@ obj-y += __udivdi3.o obj-y += __moddi3.o obj-y += __umoddi3.o - -include $(BASEDIR)/Post.mk ## variants of divide/modulo ## see files in xen/arch/ia64/linux/lib (linux/arch/ia64/lib) diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/ia64/vmx/Makefile --- a/xen/arch/ia64/vmx/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/ia64/vmx/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += hvm_vioapic.o obj-y += mm.o obj-y += mmio.o @@ -20,5 +18,3 @@ obj-y += vmx_virt.o obj-y += vmx_vsa.o obj-y += vtlb.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/ia64/xen/Makefile --- a/xen/arch/ia64/xen/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/ia64/xen/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += acpi.o obj-y += dom0_ops.o obj-y += domain.o @@ -26,5 +24,3 @@ obj-y += xentime.o obj-$(crash_debug) += gdbstub.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/ia64/xen/xentime.c --- a/xen/arch/ia64/xen/xentime.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/ia64/xen/xentime.c Wed Apr 5 23:59:18 2006 @@ -84,7 +84,13 @@ return now; } -void update_dom_time(struct vcpu *v) +void update_vcpu_system_time(struct vcpu *v) +{ + /* N-op here, and let dom0 to manage system time directly */ + return; +} + +void update_domain_wallclock_time(struct domain *d) { /* N-op here, and let dom0 to manage system time directly */ return; @@ -268,6 +274,6 @@ void send_timer_event(struct vcpu *v) { - send_guest_virq(v, VIRQ_TIMER); -} - + send_guest_vcpu_virq(v, VIRQ_TIMER); +} + diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/Makefile --- a/xen/arch/x86/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - subdir-y += acpi subdir-y += cpu subdir-y += genapic @@ -30,6 +28,7 @@ obj-y += physdev.o obj-y += rwlock.o obj-y += setup.o +obj-y += shutdown.o obj-y += smp.o obj-y += smpboot.o obj-y += string.o @@ -49,8 +48,6 @@ obj-$(crash_debug) += gdbstub.o -include $(BASEDIR)/Post.mk - $(TARGET): $(TARGET)-syms boot/mkelf32 ./boot/mkelf32 $(TARGET)-syms $(TARGET) 0x100000 \ `$(NM) $(TARGET)-syms | sort | tail -n 1 | sed -e 's/^\([^ ]*\).*/0x\1/'` @@ -59,11 +56,11 @@ $(LD) $(LDFLAGS) -T xen.lds -N \ boot/$(TARGET_SUBARCH).o $(ALL_OBJS) -o $@ $(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S - $(MAKE) $(BASEDIR)/xen-syms.o + $(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o $(LD) $(LDFLAGS) -T xen.lds -N \ boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@ $(NM) -n $@ | $(BASEDIR)/tools/symbols >$(BASEDIR)/xen-syms.S - $(MAKE) $(BASEDIR)/xen-syms.o + $(MAKE) -f $(BASEDIR)/Rules.mk $(BASEDIR)/xen-syms.o $(LD) $(LDFLAGS) -T xen.lds -N \ boot/$(TARGET_SUBARCH).o $(ALL_OBJS) $(BASEDIR)/xen-syms.o -o $@ rm -f $(BASEDIR)/xen-syms.S $(BASEDIR)/xen-syms.o diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/Rules.mk --- a/xen/arch/x86/Rules.mk Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/Rules.mk Wed Apr 5 23:59:18 2006 @@ -46,6 +46,10 @@ x86_64 := y endif +HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/*.h) +HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/svm/*.h) +HDRS += $(wildcard $(BASEDIR)/include/asm-x86/hvm/vmx/*.h) + # Test for at least GCC v3.2.x. gcc-ver = $(shell $(CC) -dumpversion | sed -e 's/^\(.\)\.\(.\)\.\(.\)/\$(1)/') ifeq ($(call gcc-ver,1),1) diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/acpi/Makefile --- a/xen/arch/x86/acpi/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/acpi/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,1 @@ -include $(BASEDIR)/Rules.mk - obj-y += boot.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/apic.c Wed Apr 5 23:59:18 2006 @@ -657,9 +657,10 @@ * zeroes page to simulate the local APIC and another * one for the IO-APIC. */ - if (!smp_found_config && detect_init_APIC()) + if (!smp_found_config && detect_init_APIC()) { apic_phys = __pa(alloc_xenheap_page()); - else + memset(__va(apic_phys), 0, PAGE_SIZE); + } else apic_phys = mp_lapic_addr; set_fixmap_nocache(FIX_APIC_BASE, apic_phys); @@ -693,6 +694,7 @@ } else { fake_ioapic_page: ioapic_phys = __pa(alloc_xenheap_page()); + memset(__va(ioapic_phys), 0, PAGE_SIZE); } set_fixmap_nocache(idx, ioapic_phys); apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/cpu/Makefile --- a/xen/arch/x86/cpu/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/cpu/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - subdir-y += mcheck subdir-y += mtrr @@ -12,5 +10,3 @@ obj-$(x86_32) += cyrix.o obj-$(x86_32) += rise.o obj-$(x86_32) += transmeta.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/cpu/common.c --- a/xen/arch/x86/cpu/common.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/cpu/common.c Wed Apr 5 23:59:18 2006 @@ -427,6 +427,17 @@ } #ifdef CONFIG_X86_HT +/* cpuid returns the value latched in the HW at reset, not the APIC ID + * register's value. For any box whose BIOS changes APIC IDs, like + * clustered APIC systems, we must use hard_smp_processor_id. + * + * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. + */ +static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) +{ + return hard_smp_processor_id() >> index_msb; +} + void __devinit detect_ht(struct cpuinfo_x86 *c) { u32 eax, ebx, ecx, edx; diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/cpu/mcheck/Makefile --- a/xen/arch/x86/cpu/mcheck/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/cpu/mcheck/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += k7.o obj-y += mce.o obj-y += non-fatal.o @@ -7,5 +5,3 @@ obj-y += p5.o obj-y += p6.o obj-y += winchip.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/cpu/mtrr/Makefile --- a/xen/arch/x86/cpu/mtrr/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/cpu/mtrr/Makefile Wed Apr 5 23:59:18 2006 @@ -1,10 +1,6 @@ -include $(BASEDIR)/Rules.mk - obj-y += amd.o obj-y += centaur.o obj-y += cyrix.o obj-y += generic.o obj-y += main.o obj-y += state.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/domain.c Wed Apr 5 23:59:18 2006 @@ -41,10 +41,6 @@ #include <xen/kernel.h> #include <xen/multicall.h> -/* opt_noreboot: If true, machine will need manual reset on error. */ -static int opt_noreboot = 0; -boolean_param("noreboot", opt_noreboot); - struct percpu_ctxt { struct vcpu *curr_vcpu; unsigned int dirty_segment_mask; @@ -98,84 +94,6 @@ reset_stack_and_jump(idle_loop); } - -static long no_idt[2]; -static int reboot_mode; - -static inline void kb_wait(void) -{ - int i; - - for ( i = 0; i < 0x10000; i++ ) - if ( (inb_p(0x64) & 0x02) == 0 ) - break; -} - -void __attribute__((noreturn)) __machine_halt(void *unused) -{ - for ( ; ; ) - safe_halt(); -} - -void machine_halt(void) -{ - watchdog_disable(); - console_start_sync(); - smp_call_function(__machine_halt, NULL, 1, 0); - __machine_halt(NULL); -} - -void machine_restart(char * __unused) -{ - int i; - - if ( opt_noreboot ) - { - printk("Reboot disabled on cmdline: require manual reset\n"); - machine_halt(); - } - - watchdog_disable(); - console_start_sync(); - - local_irq_enable(); - - /* Ensure we are the boot CPU. */ - if ( GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid ) - { - smp_call_function((void *)machine_restart, NULL, 1, 0); - for ( ; ; ) - safe_halt(); - } - - /* - * Stop all CPUs and turn off local APICs and the IO-APIC, so - * other OSs see a clean IRQ state. - */ - smp_send_stop(); - disable_IO_APIC(); - hvm_disable(); - - /* Rebooting needs to touch the page at absolute address 0. */ - *((unsigned short *)__va(0x472)) = reboot_mode; - - for ( ; ; ) - { - /* Pulse the keyboard reset line. */ - for ( i = 0; i < 100; i++ ) - { - kb_wait(); - udelay(50); - outb(0xfe,0x64); /* pulse reset low */ - udelay(50); - } - - /* That didn't work - force a triple fault.. */ - __asm__ __volatile__("lidt %0": "=m" (no_idt)); - __asm__ __volatile__("int3"); - } -} - void dump_pageframe_info(struct domain *d) { @@ -445,7 +363,7 @@ update_pagetables(v); if ( v->vcpu_id == 0 ) - init_domain_time(d); + update_domain_wallclock_time(d); /* Don't redo final setup */ set_bit(_VCPUF_initialised, &v->vcpu_flags); diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/domain_build.c --- a/xen/arch/x86/domain_build.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/domain_build.c Wed Apr 5 23:59:18 2006 @@ -773,7 +773,7 @@ zap_low_mappings(idle_pg_table_l2); #endif - init_domain_time(d); + update_domain_wallclock_time(d); set_bit(_VCPUF_initialised, &v->vcpu_flags); diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/genapic/Makefile --- a/xen/arch/x86/genapic/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/genapic/Makefile Wed Apr 5 23:59:18 2006 @@ -1,10 +1,7 @@ -include $(BASEDIR)/Rules.mk - obj-y += bigsmp.o obj-y += default.o +obj-y += delivery.o obj-y += es7000.o obj-y += es7000plat.o obj-y += probe.o obj-y += summit.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/genapic/bigsmp.c --- a/xen/arch/x86/genapic/bigsmp.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/genapic/bigsmp.c Wed Apr 5 23:59:18 2006 @@ -1,7 +1,3 @@ -/* - * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs. - * Drives the local APIC in "clustered mode". - */ #include <xen/config.h> #include <xen/cpumask.h> #include <asm/current.h> @@ -13,8 +9,6 @@ #include <xen/smp.h> #include <xen/init.h> #include <xen/dmi.h> -#include <asm/mach_ipi.h> -#include <asm/mach-bigsmp/mach_apic.h> #include <asm/mach-default/mach_mpparse.h> static int dmi_bigsmp; /* can be set by dmi scanners */ @@ -52,5 +46,5 @@ struct genapic apic_bigsmp = { APIC_INIT("bigsmp", probe_bigsmp), - .send_ipi_mask = send_IPI_mask_sequence + GENAPIC_PHYS }; diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/genapic/default.c --- a/xen/arch/x86/genapic/default.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/genapic/default.c Wed Apr 5 23:59:18 2006 @@ -12,8 +12,6 @@ #include <xen/string.h> #include <xen/smp.h> #include <xen/init.h> -#include <asm/mach_ipi.h> -#include <asm/mach-default/mach_apic.h> #include <asm/mach-default/mach_mpparse.h> /* should be called last. */ @@ -24,5 +22,5 @@ struct genapic apic_default = { APIC_INIT("default", probe_default), - .send_ipi_mask = send_IPI_mask_bitmask + GENAPIC_FLAT }; diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/genapic/es7000.c --- a/xen/arch/x86/genapic/es7000.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/genapic/es7000.c Wed Apr 5 23:59:18 2006 @@ -13,8 +13,6 @@ #include <xen/string.h> #include <xen/smp.h> #include <xen/init.h> -#include <asm/mach_ipi.h> -#include <asm/mach-es7000/mach_apic.h> #include <asm/mach-es7000/mach_mpparse.h> static __init int probe_es7000(void) @@ -25,5 +23,5 @@ struct genapic apic_es7000 = { APIC_INIT("es7000", probe_es7000), - .send_ipi_mask = send_IPI_mask_sequence + GENAPIC_PHYS }; diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/genapic/summit.c --- a/xen/arch/x86/genapic/summit.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/genapic/summit.c Wed Apr 5 23:59:18 2006 @@ -12,8 +12,6 @@ #include <xen/string.h> #include <xen/smp.h> #include <xen/init.h> -#include <asm/mach_ipi.h> -#include <asm/mach-summit/mach_apic.h> #include <asm/mach-summit/mach_mpparse.h> static __init int probe_summit(void) @@ -24,5 +22,5 @@ struct genapic apic_summit = { APIC_INIT("summit", probe_summit), - .send_ipi_mask = send_IPI_mask_sequence + GENAPIC_PHYS }; diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/Makefile --- a/xen/arch/x86/hvm/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - subdir-y += svm subdir-y += vmx @@ -10,5 +8,3 @@ obj-y += platform.o obj-y += vioapic.o obj-y += vlapic.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/intercept.c --- a/xen/arch/x86/hvm/intercept.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/intercept.c Wed Apr 5 23:59:18 2006 @@ -123,6 +123,16 @@ req->u.data = tmp1; break; + case IOREQ_TYPE_XCHG: + /* + * Note that we don't need to be atomic here since VCPU is accessing + * its own local APIC. + */ + tmp1 = read_handler(v, req->addr, req->size); + write_handler(v, req->addr, req->size, (unsigned long) req->u.data); + req->u.data = tmp1; + break; + default: printk("error ioreq type for local APIC %x\n", req->type); domain_crash_synchronous(); @@ -143,7 +153,7 @@ if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) ) { hvm_mmio_access(v, p, hvm_mmio_handlers[i]->read_handler, - hvm_mmio_handlers[i]->write_handler); + hvm_mmio_handlers[i]->write_handler); return 1; } } diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/io.c --- a/xen/arch/x86/hvm/io.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/io.c Wed Apr 5 23:59:18 2006 @@ -365,44 +365,46 @@ unsigned long old_eax; int sign = p->df ? -1 : 1; - if (p->dir == IOREQ_WRITE) { - if (p->pdata_valid) { + if ( p->pdata_valid || (mmio_opp->flags & OVERLAP) ) + { + if ( mmio_opp->flags & REPZ ) + regs->ecx -= p->count; + if ( p->dir == IOREQ_READ ) + { + regs->edi += sign * p->count * p->size; + if ( mmio_opp->flags & OVERLAP ) + { + unsigned long addr = regs->edi; + if (hvm_realmode(current)) + addr += regs->es << 4; + if (sign > 0) + addr -= p->size; + hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT); + } + } + else /* p->dir == IOREQ_WRITE */ + { + ASSERT(p->dir == IOREQ_WRITE); regs->esi += sign * p->count * p->size; - if (mmio_opp->flags & REPZ) - regs->ecx -= p->count; - } - } else { - if (mmio_opp->flags & OVERLAP) { - unsigned long addr; - - regs->edi += sign * p->count * p->size; - if (mmio_opp->flags & REPZ) - regs->ecx -= p->count; - - addr = regs->edi; - if (sign > 0) - addr -= p->size; - hvm_copy(&p->u.data, addr, p->size, HVM_COPY_OUT); - } else if (p->pdata_valid) { - regs->edi += sign * p->count * p->size; - if (mmio_opp->flags & REPZ) - regs->ecx -= p->count; - } else { - old_eax = regs->eax; - switch (p->size) { - case 1: - regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); - break; - case 2: - regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); - break; - case 4: - regs->eax = (p->u.data & 0xffffffff); - break; - default: - printk("Error: %s unknown port size\n", __FUNCTION__); - domain_crash_synchronous(); - } + } + } + else if ( p->dir == IOREQ_READ ) + { + old_eax = regs->eax; + switch ( p->size ) + { + case 1: + regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); + break; + case 2: + regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); + break; + case 4: + regs->eax = (p->u.data & 0xffffffff); + break; + default: + printk("Error: %s unknown port size\n", __FUNCTION__); + domain_crash_synchronous(); } } } diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/platform.c --- a/xen/arch/x86/hvm/platform.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/platform.c Wed Apr 5 23:59:18 2006 @@ -439,6 +439,14 @@ GET_OP_SIZE_FOR_BYTE(size_reg); return mem_reg(size_reg, opcode, instr, rex); + case 0x87: /* xchg {r/m16|r/m32}, {m/r16|m/r32} */ + instr->instr = INSTR_XCHG; + GET_OP_SIZE_FOR_NONEBYTE(instr->op_size); + if (((*(opcode+1)) & 0xc7) == 5) + return reg_mem(instr->op_size, opcode, instr, rex); + else + return mem_reg(instr->op_size, opcode, instr, rex); + case 0x88: /* mov r8, m8 */ instr->instr = INSTR_MOV; instr->op_size = BYTE; @@ -936,6 +944,17 @@ break; } + case INSTR_XCHG: + mmio_opp->flags = mmio_inst.flags; + mmio_opp->instr = mmio_inst.instr; + mmio_opp->operand[0] = mmio_inst.operand[0]; /* source */ + mmio_opp->operand[1] = mmio_inst.operand[1]; /* destination */ + + /* send the request and wait for the value */ + send_mmio_req(IOREQ_TYPE_XCHG, gpa, 1, + mmio_inst.op_size, 0, IOREQ_WRITE, 0); + break; + default: printf("Unhandled MMIO instruction\n"); domain_crash_synchronous(); diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/svm/Makefile --- a/xen/arch/x86/hvm/svm/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/svm/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - subdir-$(x86_32) += x86_32 subdir-$(x86_64) += x86_64 @@ -8,5 +6,3 @@ obj-y += intr.o obj-y += svm.o obj-y += vmcb.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/svm/x86_32/Makefile --- a/xen/arch/x86/hvm/svm/x86_32/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/svm/x86_32/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,1 @@ -include $(BASEDIR)/Rules.mk - obj-y += exits.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/svm/x86_64/Makefile --- a/xen/arch/x86/hvm/svm/x86_64/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/svm/x86_64/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,1 @@ -include $(BASEDIR)/Rules.mk - obj-y += exits.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/vmx/Makefile --- a/xen/arch/x86/hvm/vmx/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/vmx/Makefile Wed Apr 5 23:59:18 2006 @@ -1,10 +1,6 @@ -include $(BASEDIR)/Rules.mk - subdir-$(x86_32) += x86_32 subdir-$(x86_64) += x86_64 obj-y += io.o obj-y += vmcs.o obj-y += vmx.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/vmx/x86_32/Makefile --- a/xen/arch/x86/hvm/vmx/x86_32/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/vmx/x86_32/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,1 @@ -include $(BASEDIR)/Rules.mk - obj-y += exits.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/hvm/vmx/x86_64/Makefile --- a/xen/arch/x86/hvm/vmx/x86_64/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/hvm/vmx/x86_64/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,1 @@ -include $(BASEDIR)/Rules.mk - obj-y += exits.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/io_apic.c --- a/xen/arch/x86/io_apic.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/io_apic.c Wed Apr 5 23:59:18 2006 @@ -1736,8 +1736,10 @@ spin_unlock_irqrestore(&ioapic_lock, flags); /* Sanity check */ - if (reg_00.bits.ID != apic_id) - panic("IOAPIC[%d]: Unable change apic_id!\n", ioapic); + if (reg_00.bits.ID != apic_id) { + printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); + return -1; + } } apic_printk(APIC_VERBOSE, KERN_INFO diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/mpparse.c --- a/xen/arch/x86/mpparse.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/mpparse.c Wed Apr 5 23:59:18 2006 @@ -35,7 +35,7 @@ /* Have we found an MP table */ int smp_found_config; -unsigned int __initdata maxcpus = NR_CPUS; +unsigned int __devinitdata maxcpus = NR_CPUS; #ifdef CONFIG_HOTPLUG_CPU #define CPU_HOTPLUG_ENABLED (1) @@ -226,16 +226,11 @@ num_processors++; if (CPU_HOTPLUG_ENABLED || (num_processors > 8)) { - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_INTEL: - if (!APIC_XAPIC(ver)) { - def_to_bigsmp = 0; - break; - } - /* If P4 and above fall through */ - case X86_VENDOR_AMD: - def_to_bigsmp = 1; - } + /* + * No need for processor or APIC checks: physical delivery + * (bigsmp) mode should always work. + */ + def_to_bigsmp = 1; } bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; } @@ -916,6 +911,7 @@ u32 gsi_base) { int idx = 0; + int tmpid; if (nr_ioapics >= MAX_IO_APICS) { printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " @@ -936,9 +932,14 @@ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 < 15)) - mp_ioapics[idx].mpc_apicid = io_apic_get_unique_id(idx, id); + tmpid = io_apic_get_unique_id(idx, id); else - mp_ioapics[idx].mpc_apicid = id; + tmpid = id; + if (tmpid == -1) { + nr_ioapics--; + return; + } + mp_ioapics[idx].mpc_apicid = tmpid; mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); /* diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/nmi.c --- a/xen/arch/x86/nmi.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/nmi.c Wed Apr 5 23:59:18 2006 @@ -431,14 +431,14 @@ */ static void do_nmi_trigger(unsigned char key) { - u32 id = apic_read(APIC_ID); + u32 id = GET_APIC_ID(apic_read(APIC_ID)); printk("Triggering NMI on APIC ID %x\n", id); local_irq_disable(); apic_wait_icr_idle(); apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(id)); - apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_INT_ASSERT); + apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_PHYSICAL); local_irq_enable(); } diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/smp.c --- a/xen/arch/x86/smp.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/smp.c Wed Apr 5 23:59:18 2006 @@ -20,7 +20,7 @@ #include <asm/flushtlb.h> #include <asm/smpboot.h> #include <asm/hardirq.h> -#include <asm/mach_ipi.h> +#include <asm/ipi.h> #include <mach_apic.h> /* @@ -67,7 +67,7 @@ static inline int __prepare_ICR (unsigned int shortcut, int vector) { - return APIC_DM_FIXED | shortcut | vector | APIC_DEST_LOGICAL; + return APIC_DM_FIXED | shortcut | vector; } static inline int __prepare_ICR2 (unsigned int mask) @@ -85,7 +85,7 @@ ASSERT(!cpus_empty(cpumask)); } -void send_IPI_mask_bitmask(cpumask_t cpumask, int vector) +void send_IPI_mask_flat(cpumask_t cpumask, int vector) { unsigned long mask = cpus_addr(cpumask)[0]; unsigned long cfg; @@ -99,18 +99,18 @@ * Wait for idle. */ apic_wait_icr_idle(); - + /* * prepare target chip field */ cfg = __prepare_ICR2(mask); apic_write_around(APIC_ICR2, cfg); - + /* * program the ICR */ - cfg = __prepare_ICR(0, vector); - + cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL; + /* * Send the IPI. The write to APIC_ICR fires this off. */ @@ -119,7 +119,7 @@ local_irq_restore(flags); } -void send_IPI_mask_sequence(cpumask_t mask, int vector) +void send_IPI_mask_phys(cpumask_t mask, int vector) { unsigned long cfg, flags; unsigned int query_cpu; @@ -140,18 +140,18 @@ * Wait for idle. */ apic_wait_icr_idle(); - + /* * prepare target chip field */ - cfg = __prepare_ICR2(cpu_to_logical_apicid(query_cpu)); + cfg = __prepare_ICR2(cpu_physical_id(query_cpu)); apic_write_around(APIC_ICR2, cfg); - + /* * program the ICR */ - cfg = __prepare_ICR(0, vector); - + cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL; + /* * Send the IPI. The write to APIC_ICR fires this off. */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/smpboot.c --- a/xen/arch/x86/smpboot.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/smpboot.c Wed Apr 5 23:59:18 2006 @@ -1094,7 +1094,7 @@ if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID)) continue; - if (!check_apicid_present(bit)) + if (!check_apicid_present(apicid)) continue; if (max_cpus <= cpucount+1) continue; diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/time.c --- a/xen/arch/x86/time.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/time.c Wed Apr 5 23:59:18 2006 @@ -670,7 +670,7 @@ (*version)++; } -static inline void __update_dom_time(struct vcpu *v) +static inline void __update_vcpu_system_time(struct vcpu *v) { struct cpu_time *t; struct vcpu_time_info *u; @@ -688,44 +688,14 @@ version_update_end(&u->version); } -void update_dom_time(struct vcpu *v) +void update_vcpu_system_time(struct vcpu *v) { if ( v->domain->shared_info->vcpu_info[v->vcpu_id].time.tsc_timestamp != cpu_time[smp_processor_id()].local_tsc_stamp ) - __update_dom_time(v); -} - -/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */ -void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base) -{ - u64 x; - u32 y, _wc_sec, _wc_nsec; - struct domain *d; - shared_info_t *s; - - x = (secs * 1000000000ULL) + (u64)nsecs - system_time_base; - y = do_div(x, 1000000000); - - wc_sec = _wc_sec = (u32)x; - wc_nsec = _wc_nsec = (u32)y; - - read_lock(&domlist_lock); - spin_lock(&wc_lock); - - for_each_domain ( d ) - { - s = d->shared_info; - version_update_begin(&s->wc_version); - s->wc_sec = _wc_sec; - s->wc_nsec = _wc_nsec; - version_update_end(&s->wc_version); - } - - spin_unlock(&wc_lock); - read_unlock(&domlist_lock); -} - -void init_domain_time(struct domain *d) + __update_vcpu_system_time(v); +} + +void update_domain_wallclock_time(struct domain *d) { spin_lock(&wc_lock); version_update_begin(&d->shared_info->wc_version); @@ -733,6 +703,27 @@ d->shared_info->wc_nsec = wc_nsec; version_update_end(&d->shared_info->wc_version); spin_unlock(&wc_lock); +} + +/* Set clock to <secs,usecs> after 00:00:00 UTC, 1 January, 1970. */ +void do_settime(unsigned long secs, unsigned long nsecs, u64 system_time_base) +{ + u64 x; + u32 y, _wc_sec, _wc_nsec; + struct domain *d; + + x = (secs * 1000000000ULL) + (u64)nsecs - system_time_base; + y = do_div(x, 1000000000); + + spin_lock(&wc_lock); + wc_sec = _wc_sec = (u32)x; + wc_nsec = _wc_nsec = (u32)y; + spin_unlock(&wc_lock); + + read_lock(&domlist_lock); + for_each_domain ( d ) + update_domain_wallclock_time(d); + read_unlock(&domlist_lock); } static void local_time_calibration(void *unused) @@ -925,7 +916,7 @@ void send_timer_event(struct vcpu *v) { - send_guest_virq(v, VIRQ_TIMER); + send_guest_vcpu_virq(v, VIRQ_TIMER); } /* diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/traps.c --- a/xen/arch/x86/traps.c Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/traps.c Wed Apr 5 23:59:18 2006 @@ -138,13 +138,13 @@ if ( vm86_mode(regs) ) { stack = (unsigned long *)((regs->ss << 4) + (regs->esp & 0xffff)); - printk("Guest stack trace from ss:sp = %04x:%04x (VM86)\n ", + printk("Guest stack trace from ss:sp = %04x:%04x (VM86)\n ", regs->ss, (uint16_t)(regs->esp & 0xffff)); } else { stack = (unsigned long *)regs->esp; - printk("Guest stack trace from "__OP"sp=%p:\n ", stack); + printk("Guest stack trace from "__OP"sp=%p:\n ", stack); } for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) @@ -160,8 +160,8 @@ break; } if ( (i != 0) && ((i % stack_words_per_line) == 0) ) - printk("\n "); - printk("%p ", _p(addr)); + printk("\n "); + printk(" %p", _p(addr)); stack++; } if ( i == 0 ) @@ -257,16 +257,16 @@ if ( guest_mode(regs) ) return show_guest_stack(regs); - printk("Xen stack trace from "__OP"sp=%p:\n ", stack); + printk("Xen stack trace from "__OP"sp=%p:\n ", stack); for ( i = 0; i < (debug_stack_lines*stack_words_per_line); i++ ) { if ( ((long)stack & (STACK_SIZE-BYTES_PER_LONG)) == 0 ) break; if ( (i != 0) && ((i % stack_words_per_line) == 0) ) - printk("\n "); + printk("\n "); addr = *stack++; - printk("%p ", _p(addr)); + printk(" %p", _p(addr)); } if ( i == 0 ) printk("Stack empty."); diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/x86_32/Makefile --- a/xen/arch/x86/x86_32/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/x86_32/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += domain_page.o obj-y += entry.o obj-y += mm.o @@ -7,5 +5,3 @@ obj-y += traps.o obj-$(supervisor_mode_kernel) += supervisor_mode_kernel.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/x86_32/entry.S --- a/xen/arch/x86/x86_32/entry.S Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/x86_32/entry.S Wed Apr 5 23:59:18 2006 @@ -561,7 +561,7 @@ testl $APIC_ICR_BUSY,%ebx jnz 1b # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi) - movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_LOGICAL | \ + movl $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_PHYSICAL | \ TRAP_deferred_nmi),%ss:APIC_ICR(%eax) jmp restore_all_xen #endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/x86_64/Makefile --- a/xen/arch/x86/x86_64/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/x86_64/Makefile Wed Apr 5 23:59:18 2006 @@ -1,7 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += entry.o obj-y += mm.o obj-y += traps.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/Makefile --- a/xen/common/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/common/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,3 @@ -include $(BASEDIR)/Rules.mk - obj-y += acm_ops.o obj-y += bitmap.o obj-y += dom0_ops.o @@ -28,7 +26,5 @@ obj-$(perfc) += perfc.o obj-$(crash_debug) += gdbstub.o -include $(BASEDIR)/Post.mk - # Object file contains changeset and compiler information. kernel.o: $(BASEDIR)/include/xen/compile.h diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/domain.c --- a/xen/common/domain.c Wed Apr 5 23:59:06 2006 +++ b/xen/common/domain.c Wed Apr 5 23:59:18 2006 @@ -137,7 +137,7 @@ domain_relinquish_resources(d); put_domain(d); - send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC); + send_guest_global_virq(dom0, VIRQ_DOM_EXC); } } @@ -192,7 +192,7 @@ /* Don't set DOMF_shutdown until execution contexts are sync'ed. */ if ( !test_and_set_bit(_DOMF_shutdown, &d->domain_flags) ) - send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC); + send_guest_global_virq(dom0, VIRQ_DOM_EXC); UNLOCK_BIGLOCK(d); @@ -267,7 +267,7 @@ for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); - send_guest_virq(dom0->vcpu[0], VIRQ_DEBUGGER); + send_guest_global_virq(dom0, VIRQ_DEBUGGER); } @@ -307,7 +307,7 @@ free_domain(d); - send_guest_virq(dom0->vcpu[0], VIRQ_DOM_EXC); + send_guest_global_virq(dom0, VIRQ_DOM_EXC); } void vcpu_pause(struct vcpu *v) diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/event_channel.c --- a/xen/common/event_channel.c Wed Apr 5 23:59:06 2006 +++ b/xen/common/event_channel.c Wed Apr 5 23:59:18 2006 @@ -3,7 +3,7 @@ * * Event notifications from VIRQs, PIRQs, and other domains. * - * Copyright (c) 2003-2005, K A Fraser. + * Copyright (c) 2003-2006, K A Fraser. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -46,6 +46,28 @@ goto out; \ } while ( 0 ) + +static int virq_is_global(int virq) +{ + int rc; + + ASSERT((virq >= 0) && (virq < NR_VIRQS)); + + switch ( virq ) + { + case VIRQ_TIMER: + case VIRQ_DEBUG: + rc = 0; + break; + default: + rc = 1; + break; + } + + return rc; +} + + static int get_free_port(struct domain *d) { struct evtchn *chn; @@ -179,6 +201,9 @@ long rc = 0; if ( virq >= ARRAY_SIZE(v->virq_to_evtchn) ) + return -EINVAL; + + if ( virq_is_global(virq) && (vcpu != 0) ) return -EINVAL; if ( (vcpu >= ARRAY_SIZE(d->vcpu)) || ((v = d->vcpu[vcpu]) == NULL) ) @@ -360,7 +385,7 @@ rc = -EINVAL; goto out; } - + port2 = chn1->u.interdomain.remote_port; BUG_ON(!port_is_valid(d2, port2)); @@ -437,6 +462,7 @@ return ret; } + void evtchn_set_pending(struct vcpu *v, int port) { @@ -471,20 +497,47 @@ } } -void send_guest_virq(struct vcpu *v, int virq) -{ - int port = v->virq_to_evtchn[virq]; - - if ( likely(port != 0) ) - evtchn_set_pending(v, port); -} + +void send_guest_vcpu_virq(struct vcpu *v, int virq) +{ + int port; + + ASSERT(!virq_is_global(virq)); + + port = v->virq_to_evtchn[virq]; + if ( unlikely(port == 0) ) + return; + + evtchn_set_pending(v, port); +} + +void send_guest_global_virq(struct domain *d, int virq) +{ + int port; + struct evtchn *chn; + + ASSERT(virq_is_global(virq)); + + port = d->vcpu[0]->virq_to_evtchn[virq]; + if ( unlikely(port == 0) ) + return; + + chn = evtchn_from_port(d, port); + evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); +} + void send_guest_pirq(struct domain *d, int pirq) { int port = d->pirq_to_evtchn[pirq]; - struct evtchn *chn = evtchn_from_port(d, port); + struct evtchn *chn; + + ASSERT(port != 0); + + chn = evtchn_from_port(d, port); evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); } + static long evtchn_status(evtchn_status_t *status) { @@ -550,6 +603,7 @@ return rc; } + long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) { struct domain *d = current->domain; @@ -570,6 +624,12 @@ chn = evtchn_from_port(d, port); switch ( chn->state ) { + case ECS_VIRQ: + if ( virq_is_global(chn->u.virq) ) + chn->notify_vcpu_id = vcpu_id; + else + rc = -EINVAL; + break; case ECS_UNBOUND: case ECS_INTERDOMAIN: case ECS_PIRQ: @@ -584,6 +644,7 @@ spin_unlock(&d->evtchn_lock); return rc; } + static long evtchn_unmask(evtchn_unmask_t *unmask) { @@ -620,6 +681,7 @@ return 0; } + long do_event_channel_op(GUEST_HANDLE(evtchn_op_t) uop) { long rc; @@ -691,6 +753,13 @@ } return rc; +} + + +void evtchn_notify_reserved_port(struct domain *d, int port) +{ + struct evtchn *chn = evtchn_from_port(d, port); + evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); } diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/grant_table.c --- a/xen/common/grant_table.c Wed Apr 5 23:59:06 2006 +++ b/xen/common/grant_table.c Wed Apr 5 23:59:18 2006 @@ -618,8 +618,10 @@ return -EFAULT; } + mfn = gmfn_to_mfn(d, gop.mfn); + /* Check the passed page frame for basic validity. */ - if ( unlikely(!mfn_valid(gop.mfn)) ) + if ( unlikely(!mfn_valid(mfn)) ) { DPRINTK("gnttab_transfer: out-of-range %lx\n", (unsigned long)gop.mfn); @@ -627,7 +629,6 @@ goto copyback; } - mfn = gmfn_to_mfn(d, gop.mfn); page = mfn_to_page(mfn); if ( unlikely(IS_XEN_HEAP_FRAME(page)) ) { diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/keyhandler.c --- a/xen/common/keyhandler.c Wed Apr 5 23:59:06 2006 +++ b/xen/common/keyhandler.c Wed Apr 5 23:59:18 2006 @@ -162,7 +162,7 @@ &d->shared_info->evtchn_mask[0]), test_bit(v->virq_to_evtchn[VIRQ_DEBUG]/BITS_PER_LONG, &v->vcpu_info->evtchn_pending_sel)); - send_guest_virq(v, VIRQ_DEBUG); + send_guest_vcpu_virq(v, VIRQ_DEBUG); } } diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/page_alloc.c --- a/xen/common/page_alloc.c Wed Apr 5 23:59:06 2006 +++ b/xen/common/page_alloc.c Wed Apr 5 23:59:18 2006 @@ -170,7 +170,7 @@ void init_boot_pages(paddr_t ps, paddr_t pe) { - unsigned long bad_pfn; + unsigned long bad_spfn, bad_epfn, i; char *p; ps = round_pgup(ps); @@ -184,18 +184,31 @@ p = opt_badpage; while ( *p != '\0' ) { - bad_pfn = simple_strtoul(p, &p, 0); + bad_spfn = simple_strtoul(p, &p, 0); + bad_epfn = bad_spfn; + + if ( *p == '-' ) + { + p++; + bad_epfn = simple_strtoul(p, &p, 0); + if ( bad_epfn < bad_spfn ) + bad_epfn = bad_spfn; + } if ( *p == ',' ) p++; else if ( *p != '\0' ) break; - if ( (bad_pfn < max_page) && !allocated_in_map(bad_pfn) ) - { - printk("Marking page %lx as bad\n", bad_pfn); - map_alloc(bad_pfn, 1); - } + if ( bad_epfn == bad_spfn ) + printk("Marking page %lx as bad\n", bad_spfn); + else + printk("Marking pages %lx through %lx as bad\n", + bad_spfn, bad_epfn); + + for ( i = bad_spfn; i <= bad_epfn; i++ ) + if ( (i < max_page) && !allocated_in_map(i) ) + map_alloc(i, 1); } } diff -r 09967f2d6e3b -r ece9b5710b29 xen/common/schedule.c --- a/xen/common/schedule.c Wed Apr 5 23:59:06 2006 +++ b/xen/common/schedule.c Wed Apr 5 23:59:18 2006 @@ -572,7 +572,7 @@ /* Ensure that the domain has an up-to-date time base. */ if ( !is_idle_vcpu(next) ) { - update_dom_time(next); + update_vcpu_system_time(next); if ( next->sleep_tick != schedule_data[cpu].tick ) send_timer_event(next); } @@ -609,7 +609,7 @@ if ( !is_idle_vcpu(v) ) { - update_dom_time(v); + update_vcpu_system_time(v); send_timer_event(v); } @@ -623,7 +623,7 @@ { struct vcpu *v = data; - update_dom_time(v); + update_vcpu_system_time(v); send_timer_event(v); } diff -r 09967f2d6e3b -r ece9b5710b29 xen/drivers/Makefile --- a/xen/drivers/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/drivers/Makefile Wed Apr 5 23:59:18 2006 @@ -1,6 +1,2 @@ -include $(BASEDIR)/Rules.mk - subdir-y += char subdir-$(HAS_ACPI) += acpi - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/drivers/acpi/Makefile --- a/xen/drivers/acpi/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/drivers/acpi/Makefile Wed Apr 5 23:59:18 2006 @@ -1,5 +1,1 @@ -include $(BASEDIR)/Rules.mk - obj-y += tables.o - -include $(BASEDIR)/Post.mk diff -r 09967f2d6e3b -r ece9b5710b29 xen/drivers/char/Makefile --- a/xen/drivers/char/Makefile Wed Apr 5 23:59:06 2006 +++ b/xen/drivers/char/Makefile Wed Apr 5 23:59:18 2006 @@ -1,10 +1,6 @@ -include $(BASEDIR)/Rules.mk - obj-y += console.o obj-y += ns16550.o obj-y += serial.o -include $(BASEDIR)/Post.mk - # Object file contains changeset and compiler information. console.o: $(BASEDIR)/include/xen/compile.h diff -r 09967f2d6e3b -r ece9b5710b29 xen/drivers/char/console.c --- a/xen/drivers/char/console.c Wed Apr 5 23:59:06 2006 +++ b/xen/drivers/char/console.c Wed Apr 5 23:59:18 2006 @@ -200,10 +200,11 @@ } else { + if ( xpos >= COLUMNS ) + put_newline(); video[(xpos + ypos * COLUMNS) * 2] = c & 0xFF; video[(xpos + ypos * COLUMNS) * 2 + 1] = ATTRIBUTE; - if ( ++xpos >= COLUMNS ) - put_newline(); + ++xpos; } } @@ -293,7 +294,7 @@ if ( (serial_rx_prod-serial_rx_cons) != SERIAL_RX_SIZE ) serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod++)] = c; /* Always notify the guest: prevents receive path from getting stuck. */ - send_guest_virq(dom0->vcpu[0], VIRQ_CONSOLE); + send_guest_global_virq(dom0, VIRQ_CONSOLE); } static void serial_rx(char c, struct cpu_user_regs *regs) diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/apicdef.h --- a/xen/include/asm-x86/apicdef.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/apicdef.h Wed Apr 5 23:59:18 2006 @@ -62,6 +62,7 @@ #define APIC_INT_ASSERT 0x04000 #define APIC_ICR_BUSY 0x01000 #define APIC_DEST_LOGICAL 0x00800 +#define APIC_DEST_PHYSICAL 0x00000 #define APIC_DM_FIXED 0x00000 #define APIC_DM_LOWEST 0x00100 #define APIC_DM_SMI 0x00200 diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/genapic.h --- a/xen/include/asm-x86/genapic.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/genapic.h Wed Apr 5 23:59:18 2006 @@ -21,27 +21,6 @@ char *name; int (*probe)(void); - int (*apic_id_registered)(void); - cpumask_t (*target_cpus)(void); - int int_delivery_mode; - int int_dest_mode; - int ESR_DISABLE; - int apic_destination_logical; - unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); - unsigned long (*check_apicid_present)(int apicid); - int no_balance_irq; - void (*init_apic_ldr)(void); - physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); - - void (*clustered_apic_check)(void); - int (*apicid_to_node)(int logical_apicid); - int (*cpu_to_logical_apicid)(int cpu); - int (*cpu_present_to_apicid)(int mps_cpu); - physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); - int (*check_phys_apicid_present)(int boot_cpu_physical_apicid); - void (*enable_apic_mode)(void); - u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb); - /* When one of the next two hooks returns 1 the genapic is switched to this. Essentially they are additional probe functions. */ @@ -49,10 +28,14 @@ char *productid); int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); + /* Interrupt delivery parameters ('physical' vs. 'logical flat'). */ + int int_delivery_mode; + int int_dest_mode; + void (*init_apic_ldr)(void); + void (*clustered_apic_check)(void); + cpumask_t (*target_cpus)(void); unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); - - /* ipi */ - void (*send_ipi_mask)(cpumask_t mask, int vector); + void (*send_IPI_mask)(cpumask_t mask, int vector); }; #define APICFUNC(x) .x = x @@ -60,29 +43,37 @@ #define APIC_INIT(aname, aprobe) \ .name = aname, \ .probe = aprobe, \ - .int_delivery_mode = INT_DELIVERY_MODE, \ - .int_dest_mode = INT_DEST_MODE, \ - .no_balance_irq = NO_BALANCE_IRQ, \ - .ESR_DISABLE = esr_disable, \ - .apic_destination_logical = APIC_DEST_LOGICAL, \ - APICFUNC(apic_id_registered), \ - APICFUNC(target_cpus), \ - APICFUNC(check_apicid_used), \ - APICFUNC(check_apicid_present), \ - APICFUNC(init_apic_ldr), \ - APICFUNC(ioapic_phys_id_map), \ - APICFUNC(clustered_apic_check), \ - APICFUNC(apicid_to_node), \ - APICFUNC(cpu_to_logical_apicid), \ - APICFUNC(cpu_present_to_apicid), \ - APICFUNC(apicid_to_cpu_present), \ - APICFUNC(check_phys_apicid_present), \ APICFUNC(mps_oem_check), \ - APICFUNC(cpu_mask_to_apicid), \ - APICFUNC(acpi_madt_oem_check), \ - APICFUNC(enable_apic_mode), \ - APICFUNC(phys_pkg_id) + APICFUNC(acpi_madt_oem_check) extern struct genapic *genapic; +void init_apic_ldr_flat(void); +void clustered_apic_check_flat(void); +cpumask_t target_cpus_flat(void); +unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask); +void send_IPI_mask_flat(cpumask_t mask, int vector); +#define GENAPIC_FLAT \ + .int_delivery_mode = dest_LowestPrio, \ + .int_dest_mode = 1 /* logical delivery */, \ + .init_apic_ldr = init_apic_ldr_flat, \ + .clustered_apic_check = clustered_apic_check_flat, \ + .target_cpus = target_cpus_flat, \ + .cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \ + .send_IPI_mask = send_IPI_mask_flat + +void init_apic_ldr_phys(void); +void clustered_apic_check_phys(void); +cpumask_t target_cpus_phys(void); +unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask); +void send_IPI_mask_phys(cpumask_t mask, int vector); +#define GENAPIC_PHYS \ + .int_delivery_mode = dest_Fixed, \ + .int_dest_mode = 0 /* physical delivery */, \ + .init_apic_ldr = init_apic_ldr_phys, \ + .clustered_apic_check = clustered_apic_check_phys, \ + .target_cpus = target_cpus_phys, \ + .cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \ + .send_IPI_mask = send_IPI_mask_phys + #endif diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/hvm/io.h --- a/xen/include/asm-x86/hvm/io.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/hvm/io.h Wed Apr 5 23:59:18 2006 @@ -66,6 +66,7 @@ #define INSTR_STOS 10 #define INSTR_TEST 11 #define INSTR_BT 12 +#define INSTR_XCHG 13 struct instruction { __s8 instr; /* instruction type */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach-generic/mach_apic.h --- a/xen/include/asm-x86/mach-generic/mach_apic.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/mach-generic/mach_apic.h Wed Apr 5 23:59:18 2006 @@ -2,28 +2,40 @@ #define __ASM_MACH_APIC_H #include <asm/genapic.h> +#include <asm/smp.h> -#define esr_disable (genapic->ESR_DISABLE) -#define NO_BALANCE_IRQ (genapic->no_balance_irq) +/* ESR was originally disabled in Linux for NUMA-Q. Do we really need to? */ +#define esr_disable (0) + +/* The following are dependent on APIC delivery mode (logical vs. physical). */ #define INT_DELIVERY_MODE (genapic->int_delivery_mode) #define INT_DEST_MODE (genapic->int_dest_mode) -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL (genapic->apic_destination_logical) #define TARGET_CPUS (genapic->target_cpus()) -#define apic_id_registered (genapic->apic_id_registered) #define init_apic_ldr (genapic->init_apic_ldr) -#define ioapic_phys_id_map (genapic->ioapic_phys_id_map) #define clustered_apic_check (genapic->clustered_apic_check) -#define apicid_to_node (genapic->apicid_to_node) -#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) -#define cpu_present_to_apicid (genapic->cpu_present_to_apicid) -#define apicid_to_cpu_present (genapic->apicid_to_cpu_present) -#define check_apicid_present (genapic->check_apicid_present) -#define check_phys_apicid_present (genapic->check_phys_apicid_present) -#define check_apicid_used (genapic->check_apicid_used) #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) -#define enable_apic_mode (genapic->enable_apic_mode) -#define phys_pkg_id (genapic->phys_pkg_id) + +extern void es7000_sw_apic(void); +static inline void enable_apic_mode(void) +{ + es7000_sw_apic(); + return; +} + +/* No sane NUMA support right now. We should parse ACPI SRAT. */ +static inline int apicid_to_node(int logical_apicid) +{ + return 0; +} + +extern u8 bios_cpu_apicid[]; +static inline int cpu_present_to_apicid(int mps_cpu) +{ + if (mps_cpu < NR_CPUS) + return (int)bios_cpu_apicid[mps_cpu]; + else + return BAD_APICID; +} static inline int mpc_apic_id(struct mpc_config_processor *m, struct mpc_config_translation *translation_record) @@ -47,4 +59,41 @@ extern void generic_bigsmp_probe(void); +/* + * The following functions based around phys_cpu_present_map are disabled in + * some i386 Linux subarchitectures, and in x86_64 'cluster' genapic mode. I'm + * really not sure why, since all local APICs should have distinct physical + * IDs, and we need to know what they are. + */ +static inline int apic_id_registered(void) +{ + return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), + phys_cpu_present_map); +} + +static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) +{ + return phys_map; +} + +static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) +{ + return physid_isset(apicid, bitmap); +} + +static inline unsigned long check_apicid_present(int apicid) +{ + return physid_isset(apicid, phys_cpu_present_map); +} + +static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) +{ + return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); +} + +static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) +{ + return physid_mask_of_physid(phys_apicid); +} + #endif /* __ASM_MACH_APIC_H */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach-summit/mach_mpparse.h --- a/xen/include/asm-x86/mach-summit/mach_mpparse.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/mach-summit/mach_mpparse.h Wed Apr 5 23:59:18 2006 @@ -1,7 +1,5 @@ #ifndef __ASM_MACH_MPPARSE_H #define __ASM_MACH_MPPARSE_H - -#include <mach_apic.h> extern int use_cyclone; diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/time.h --- a/xen/include/asm-x86/time.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/time.h Wed Apr 5 23:59:18 2006 @@ -6,9 +6,6 @@ extern void calibrate_tsc_bp(void); extern void calibrate_tsc_ap(void); - -struct domain; -extern void init_domain_time(struct domain *d); typedef u64 cycles_t; diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/public/event_channel.h --- a/xen/include/public/event_channel.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/public/event_channel.h Wed Apr 5 23:59:18 2006 @@ -50,9 +50,13 @@ * EVTCHNOP_bind_virq: Bind a local event channel to VIRQ <irq> on specified * vcpu. * NOTES: - * 1. A virtual IRQ may be bound to at most one event channel per vcpu. - * 2. The allocated event channel is bound to the specified vcpu. The binding - * may not be changed. + * 1. Virtual IRQs are classified as per-vcpu or global. See the VIRQ list + * in xen.h for the classification of each VIRQ. + * 2. Global VIRQs must be allocated on VCPU0 but can subsequently be + * re-bound via EVTCHNOP_bind_vcpu. + * 3. Per-vcpu VIRQs may be bound to at most one event channel per vcpu. + * The allocated event channel is bound to the specified vcpu and the + * binding cannot be changed. */ #define EVTCHNOP_bind_virq 1 typedef struct evtchn_bind_virq { @@ -152,9 +156,11 @@ * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an * event is pending. * NOTES: - * 1. IPI- and VIRQ-bound channels always notify the vcpu that initialised - * the binding. This binding cannot be changed. - * 2. All other channels notify vcpu0 by default. This default is set when + * 1. IPI-bound channels always notify the vcpu specified at bind time. + * This binding cannot be changed. + * 2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time. + * This binding cannot be changed. + * 3. All other channels notify vcpu0 by default. This default is set when * the channel is allocated (a port that is freed and subsequently reused * has its binding reset to vcpu0). */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/public/hvm/ioreq.h --- a/xen/include/public/hvm/ioreq.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/public/hvm/ioreq.h Wed Apr 5 23:59:18 2006 @@ -34,6 +34,7 @@ #define IOREQ_TYPE_AND 2 #define IOREQ_TYPE_OR 3 #define IOREQ_TYPE_XOR 4 +#define IOREQ_TYPE_XCHG 5 /* * VMExit dispatcher should cooperate with instruction decoder to diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/public/xen.h --- a/xen/include/public/xen.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/public/xen.h Wed Apr 5 23:59:18 2006 @@ -65,12 +65,17 @@ * VIRTUAL INTERRUPTS * * Virtual interrupts that a guest OS may receive from Xen. - */ -#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */ -#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */ -#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ -#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ -#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ + * + * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a + * global VIRQ. The former can be bound once per VCPU and cannot be re-bound. + * The latter can be allocated only once per guest: they must initially be + * allocated to VCPU0 but can subsequently be re-bound. + */ +#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */ +#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */ +#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */ +#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */ +#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */ #define NR_VIRQS 8 /* diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/xen/event.h --- a/xen/include/xen/event.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/xen/event.h Wed Apr 5 23:59:18 2006 @@ -3,7 +3,7 @@ * * A nice interface for passing asynchronous events to guest OSes. * - * Copyright (c) 2002-2005, K A Fraser + * Copyright (c) 2002-2006, K A Fraser */ #ifndef __XEN_EVENT_H__ @@ -18,11 +18,18 @@ extern void evtchn_set_pending(struct vcpu *v, int port); /* - * send_guest_virq: + * send_guest_vcpu_virq: Notify guest via a per-VCPU VIRQ. * @v: VCPU to which virtual IRQ should be sent * @virq: Virtual IRQ number (VIRQ_*) */ -extern void send_guest_virq(struct vcpu *v, int virq); +extern void send_guest_vcpu_virq(struct vcpu *v, int virq); + +/* + * send_guest_global_virq: Notify guest via a global VIRQ. + * @d: Domain to which virtual IRQ should be sent + * @virq: Virtual IRQ number (VIRQ_*) + */ +extern void send_guest_global_virq(struct domain *d, int virq); /* * send_guest_pirq: diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/xen/time.h --- a/xen/include/xen/time.h Wed Apr 5 23:59:06 2006 +++ b/xen/include/xen/time.h Wed Apr 5 23:59:18 2006 @@ -55,7 +55,9 @@ #define MILLISECS(_ms) ((s_time_t)((_ms) * 1000000ULL)) #define MICROSECS(_us) ((s_time_t)((_us) * 1000ULL)) -extern void update_dom_time(struct vcpu *v); +extern void update_vcpu_system_time(struct vcpu *v); +extern void update_domain_wallclock_time(struct domain *d); + extern void do_settime( unsigned long secs, unsigned long nsecs, u64 system_time_base); diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/genapic/delivery.c --- /dev/null Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/genapic/delivery.c Wed Apr 5 23:59:18 2006 @@ -0,0 +1,68 @@ +#include <xen/config.h> +#include <xen/irq.h> +#include <xen/sched.h> +#include <asm/current.h> +#include <asm/smp.h> +#include <asm/hardirq.h> +#include <mach_apic.h> + + +/* + * LOGICAL FLAT DELIVERY MODE (multicast via bitmask to <= 8 logical APIC IDs). + */ + +void init_apic_ldr_flat(void) +{ + unsigned long val; + + apic_write_around(APIC_DFR, APIC_DFR_FLAT); + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); + apic_write_around(APIC_LDR, val); +} + +void clustered_apic_check_flat(void) +{ + printk("Enabling APIC mode: Flat. Using %d I/O APICs\n", nr_ioapics); +} + +cpumask_t target_cpus_flat(void) +{ + return cpu_online_map; +} + +unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask) +{ + return cpus_addr(cpumask)[0]; +} + + +/* + * PHYSICAL DELIVERY MODE (unicast to physical APIC IDs). + */ + +void init_apic_ldr_phys(void) +{ + unsigned long val; + apic_write_around(APIC_DFR, APIC_DFR_FLAT); + /* A dummy logical ID should be fine. We only deliver in phys mode. */ + val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; + apic_write_around(APIC_LDR, val); +} + +void clustered_apic_check_phys(void) +{ + printk("Enabling APIC mode: Phys. Using %d I/O APICs\n", nr_ioapics); +} + +cpumask_t target_cpus_phys(void) +{ + /* IRQs will get bound more accurately later. */ + return cpumask_of_cpu(0); +} + +unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask) +{ + /* As we are using single CPU as destination, pick only one CPU here */ + return cpu_physical_id(first_cpu(cpumask)); +} diff -r 09967f2d6e3b -r ece9b5710b29 xen/arch/x86/shutdown.c --- /dev/null Wed Apr 5 23:59:06 2006 +++ b/xen/arch/x86/shutdown.c Wed Apr 5 23:59:18 2006 @@ -0,0 +1,342 @@ +/****************************************************************************** + * arch/x86/shutdown.c + * + * x86-specific shutdown handling. + */ + +#include <xen/config.h> +#include <xen/init.h> +#include <xen/lib.h> +#include <xen/sched.h> +#include <xen/smp.h> +#include <xen/delay.h> +#include <xen/dmi.h> +#include <asm/regs.h> +#include <asm/mc146818rtc.h> +#include <asm/system.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <asm/mpspec.h> +#include <xen/irq.h> +#include <xen/console.h> +#include <asm/msr.h> + +/* opt_noreboot: If true, machine will need manual reset on error. */ +static int opt_noreboot = 0; +boolean_param("noreboot", opt_noreboot); + +/* reboot_str: comma-separated list of reboot options. */ +static char __initdata reboot_str[10] = ""; +string_param("reboot", reboot_str); + +static long no_idt[2]; +static int reboot_mode; + +static inline void kb_wait(void) +{ + int i; + + for ( i = 0; i < 0x10000; i++ ) + if ( (inb_p(0x64) & 0x02) == 0 ) + break; +} + +void __attribute__((noreturn)) __machine_halt(void *unused) +{ + for ( ; ; ) + safe_halt(); +} + +void machine_halt(void) +{ + watchdog_disable(); + console_start_sync(); + smp_call_function(__machine_halt, NULL, 1, 0); + __machine_halt(NULL); +} + +#ifdef __i386__ + +static int reboot_thru_bios; + +/* The following code and data reboots the machine by switching to real + mode and jumping to the BIOS reset entry point, as if the CPU has + really been reset. The previous version asked the keyboard + controller to pulse the CPU reset line, which is more thorough, but + doesn't work with at least one type of 486 motherboard. It is easy + to stop this code working; hence the copious comments. */ + +static unsigned long long +real_mode_gdt_entries [3] = +{ + 0x0000000000000000ULL, /* Null descriptor */ + 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ + 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ +}; + +static const struct +{ + unsigned short size __attribute__ ((packed)); + unsigned long long * base __attribute__ ((packed)); +} +real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries }, +real_mode_idt = { 0x3ff, NULL }; + + +/* This is 16-bit protected mode code to disable paging and the cache, + switch to real mode and jump to the BIOS reset code. + + The instruction that switches to real mode by writing to CR0 must be + followed immediately by a far jump instruction, which set CS to a + valid value for real mode, and flushes the prefetch queue to avoid + running instructions that have already been decoded in protected + mode. + + Clears all the flags except ET, especially PG (paging), PE + (protected-mode enable) and TS (task switch for coprocessor state + save). Flushes the TLB after paging has been disabled. Sets CD and + NW, to disable the cache on a 486, and invalidates the cache. This + is more like the state of a 486 after reset. I don't know if + something else should be done for other chips. + + More could be done here to set up the registers as if a CPU reset had + occurred; hopefully real BIOSs don't assume much. */ + +static const unsigned char real_mode_switch [] = +{ + 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */ + 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */ + 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */ + 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */ + 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */ + 0x0f, 0x20, 0xc2, /* movl %cr0,%edx */ + 0x66, 0x81, 0xe2, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%edx */ + 0x74, 0x02, /* jz f */ + 0x0f, 0x09, /* wbinvd */ + 0x24, 0x10, /* f: andb $0x10,al */ + 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ +}; +#define MAX_LENGTH 0x40 +static const unsigned char jump_to_bios [] = +{ + 0xea, 0xf0, 0xff, 0x00, 0xf0 /* ljmp $0xf000,$0xfff0 */ +}; + +/* + * Switch to real mode and then execute the code + * specified by the code and length parameters. + * We assume that length will aways be less that MAX_LENGTH! + */ +void machine_real_restart(const unsigned char *code, unsigned length) +{ + local_irq_disable(); + + /* Write zero to CMOS register number 0x0f, which the BIOS POST + routine will recognize as telling it to do a proper reboot. (Well + that's what this book in front of me says -- it may only apply to + the Phoenix BIOS though, it's not clear). At the same time, + disable NMIs by setting the top bit in the CMOS address register, + as we're about to do peculiar things to the CPU. */ + + spin_lock(&rtc_lock); + CMOS_WRITE(0x00, 0x8f); + spin_unlock(&rtc_lock); + + /* Identity-map virtual address zero. */ + + map_pages_to_xen(0, 0, 1, __PAGE_HYPERVISOR|MAP_SMALL_PAGES); + set_current(idle_vcpu[0]); + write_ptbase(idle_vcpu[0]); + + /* For the switch to real mode, copy some code to low memory. It has + to be in the first 64k because it is running in 16-bit mode, and it + has to have the same physical and virtual address, because it turns + off paging. Copy it near the end of the first page, out of the way + of BIOS variables. */ + + memcpy((void *)(PAGE_SIZE - sizeof(real_mode_switch) - MAX_LENGTH), + real_mode_switch, sizeof(real_mode_switch)); + memcpy((void *)(PAGE_SIZE - MAX_LENGTH), code, length); + + /* Set up the IDT for real mode. */ + + __asm__ __volatile__("lidt %0": : "m" (real_mode_idt)); + + /* Set up a GDT from which we can load segment descriptors for real + mode. The GDT is not used in real mode; it is just needed here to + prepare the descriptors. */ + + __asm__ __volatile__("lgdt %0": : "m" (real_mode_gdt)); + + /* Load the data segment registers, and thus the descriptors ready for + real mode. The base address of each segment is 0x100, 16 times the + selector value being loaded here. This is so that the segment + registers don't have to be reloaded after switching to real mode: + the values are consistent for real mode operation already. */ + + __asm__ __volatile__ ("\tmov %0,%%ds\n" + "\tmov %0,%%es\n" + "\tmov %0,%%fs\n" + "\tmov %0,%%gs\n" + "\tmov %0,%%ss" + : + : "r" (0x0010)); + + /* Jump to the 16-bit code that we copied earlier. It disables paging + and the cache, switches to real mode, and jumps to the BIOS reset + entry point. */ + + __asm__ __volatile__ ("ljmp $0x0008,%0" + : + : "i" ((void *)(PAGE_SIZE - + sizeof(real_mode_switch) - + MAX_LENGTH))); +} + +#else /* __x86_64__ */ + +#define machine_real_restart(x, y) +#define reboot_thru_bios 0 + +#endif + +void machine_restart(char * __unused) +{ + int i; + + if ( opt_noreboot ) + { + printk("Reboot disabled on cmdline: require manual reset\n"); + machine_halt(); + } + + watchdog_disable(); + console_start_sync(); + + local_irq_enable(); + + /* Ensure we are the boot CPU. */ + if ( GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid ) + { + smp_call_function((void *)machine_restart, NULL, 1, 0); + for ( ; ; ) + safe_halt(); + } + + /* + * Stop all CPUs and turn off local APICs and the IO-APIC, so + * other OSs see a clean IRQ state. + */ + smp_send_stop(); + disable_IO_APIC(); + hvm_disable(); + + /* Rebooting needs to touch the page at absolute address 0. */ + *((unsigned short *)__va(0x472)) = reboot_mode; + + if (reboot_thru_bios <= 0) + { + for ( ; ; ) + { + /* Pulse the keyboard reset line. */ + for ( i = 0; i < 100; i++ ) + { + kb_wait(); + udelay(50); + outb(0xfe,0x64); /* pulse reset low */ + udelay(50); + } + + /* That didn't work - force a triple fault.. */ + __asm__ __volatile__("lidt %0": "=m" (no_idt)); + __asm__ __volatile__("int3"); + } + } + machine_real_restart(jump_to_bios, sizeof(jump_to_bios)); +} + +#ifndef reboot_thru_bios +static int __init set_bios_reboot(struct dmi_system_id *d) +{ + if ( !reboot_thru_bios ) + { + reboot_thru_bios = 1; + printk("%s series board detected. " + "Selecting BIOS-method for reboots.\n", d->ident); + } + return 0; +} + +static struct dmi_system_id __initdata reboot_dmi_table[] = { + { /* Handle problems with rebooting on Dell 1300's */ + .callback = set_bios_reboot, + .ident = "Dell PowerEdge 1300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), + }, + }, + { /* Handle problems with rebooting on Dell 300's */ + .callback = set_bios_reboot, + .ident = "Dell PowerEdge 300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), + }, + }, + { /* Handle problems with rebooting on Dell 2400's */ + .callback = set_bios_reboot, + .ident = "Dell PowerEdge 2400", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), + }, + }, + { /* Handle problems with rebooting on HP laptops */ + .callback = set_bios_reboot, + .ident = "HP Compaq Laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), + }, + }, + { } +}; +#endif + +static int __init reboot_init(void) +{ + const char *str; + + for ( str = reboot_str; *str != '\0'; str++ ) + { + switch ( *str ) + { + case 'n': /* no reboot */ + opt_noreboot = 1; + break; + case 'w': /* "warm" reboot (no memory testing etc) */ + reboot_mode = 0x1234; + break; + case 'c': /* "cold" reboot (with memory testing etc) */ + reboot_mode = 0x0; + break; +#ifndef reboot_thru_bios + case 'b': /* "bios" reboot by jumping through the BIOS */ + reboot_thru_bios = 1; + break; + case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */ + reboot_thru_bios = -1; + break; +#endif + } + if ( (str = strchr(str, ',')) == NULL ) + break; + } + +#ifndef reboot_thru_bios + dmi_check_system(reboot_dmi_table); +#endif + return 0; +} +__initcall(reboot_init); diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/ipi.h --- /dev/null Wed Apr 5 23:59:06 2006 +++ b/xen/include/asm-x86/ipi.h Wed Apr 5 23:59:18 2006 @@ -0,0 +1,8 @@ +#ifndef __ASM_IPI_H +#define __ASM_IPI_H + +#include <asm/genapic.h> + +#define send_IPI_mask (genapic->send_IPI_mask) + +#endif /* __ASM_IPI_H */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/Post.mk --- a/xen/Post.mk Wed Apr 5 23:59:06 2006 +++ /dev/null Wed Apr 5 23:59:18 2006 @@ -1,27 +0,0 @@ -# Ensure each subdirectory has exactly one trailing slash. -subdir-n := $(patsubst %,%/,$(patsubst %/,%,$(subdir-n))) -subdir-y := $(patsubst %,%/,$(patsubst %/,%,$(subdir-y))) - -# Add explicitly declared subdirectories to the object list. -obj-y += $(patsubst %,%/built_in.o,$(subdir-y)) - -# Add implicitly declared subdirectories (in the object list) to the -# subdirectory list, and rewrite the object-list entry. -subdir-y += $(filter %/,$(obj-y)) -obj-y := $(patsubst %/,%/built-in.o,$(obj-y)) - -subdir-all := $(subdir-y) $(subdir-n) - -built_in.o: $(obj-y) - $(LD) $(LDFLAGS) -r -o $@ $^ - -.PHONY: FORCE -FORCE: - -%/built_in.o: FORCE - $(MAKE) -C $* - -clean:: $(addprefix _clean_, $(subdir-all)) FORCE - rm -f *.o *~ core -_clean_%/: FORCE - $(MAKE) -C $* clean diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach-bigsmp/mach_apic.h --- a/xen/include/asm-x86/mach-bigsmp/mach_apic.h Wed Apr 5 23:59:06 2006 +++ /dev/null Wed Apr 5 23:59:18 2006 @@ -1,138 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - - -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) -#define esr_disable (1) - -static inline int apic_id_registered(void) -{ - return (1); -} - -/* Round robin the irqs amoung the online cpus */ -static inline cpumask_t target_cpus(void) -{ - static unsigned long cpu = NR_CPUS; - do { - if (cpu >= NR_CPUS) - cpu = first_cpu(cpu_online_map); - else - cpu = next_cpu(cpu, cpu_online_map); - } while (cpu >= NR_CPUS); - return cpumask_of_cpu(cpu); -} - -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL 0 -#define TARGET_CPUS (target_cpus()) -#define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define INT_DELIVERY_MODE (dest_Fixed) -#define INT_DEST_MODE (0) /* phys delivery to target proc */ -#define NO_BALANCE_IRQ (0) -#define WAKE_SECONDARY_VIA_INIT - - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return (0); -} - -static inline unsigned long check_apicid_present(int bit) -{ - return (1); -} - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long val, id; - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - id = xapic_phys_to_log_apicid(cpu); - val |= SET_APIC_LOGICAL_ID(id); - return val; -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write_around(APIC_LDR, val); -} - -static inline void clustered_apic_check(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Physflat", nr_ioapics); -} - -static inline int apicid_to_node(int logical_apicid) -{ - return (0); -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; - - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -extern u8 cpu_2_logical_apicid[]; -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - if (cpu >= NR_CPUS) - return BAD_APICID; - return cpu_physical_id(cpu); -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xFFL); -} - -static inline void enable_apic_mode(void) -{ -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return (1); -} - -/* As we are using single CPU as destination, pick only one CPU here */ -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - int cpu; - int apicid; - - cpu = first_cpu(cpumask); - apicid = cpu_to_logical_apicid(cpu); - return apicid; -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach-default/mach_apic.h --- a/xen/include/asm-x86/mach-default/mach_apic.h Wed Apr 5 23:59:06 2006 +++ /dev/null Wed Apr 5 23:59:18 2006 @@ -1,110 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <asm/smp.h> - -#define APIC_DFR_VALUE (APIC_DFR_FLAT) - -static inline cpumask_t target_cpus(void) -{ -#ifdef CONFIG_SMP - return cpu_online_map; -#else - return cpumask_of_cpu(0); -#endif -} -#define TARGET_CPUS (target_cpus()) - -#define NO_BALANCE_IRQ (0) -#define esr_disable (0) - -#define INT_DELIVERY_MODE dest_LowestPrio -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return physid_isset(apicid, bitmap); -} - -static inline unsigned long check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LDR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); - apic_write_around(APIC_LDR, val); -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - return phys_map; -} - -static inline void clustered_apic_check(void) -{ - printk("Enabling APIC mode: %s. Using %d I/O APICs\n", - "Flat", nr_ioapics); -} - -static inline int apicid_to_node(int logical_apicid) -{ - return 0; -} - -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - return 1 << cpu; -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < get_physical_broadcast()) - return mps_cpu; - else - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) -{ - return physid_mask_of_physid(phys_apicid); -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map); -} - -static inline int apic_id_registered(void) -{ - return physid_isset(GET_APIC_ID(apic_read(APIC_ID)), phys_cpu_present_map); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - return cpus_addr(cpumask)[0]; -} - -static inline void enable_apic_mode(void) -{ -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach-es7000/mach_apic.h --- a/xen/include/asm-x86/mach-es7000/mach_apic.h Wed Apr 5 23:59:06 2006 +++ /dev/null Wed Apr 5 23:59:18 2006 @@ -1,185 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -extern u8 bios_cpu_apicid[]; - -#define xapic_phys_to_log_apicid(cpu) (bios_cpu_apicid[cpu]) -#define esr_disable (1) - -static inline int apic_id_registered(void) -{ - return (1); -} - -static inline cpumask_t target_cpus(void) -{ -#if defined CONFIG_ES7000_CLUSTERED_APIC - return CPU_MASK_ALL; -#else - return cpumask_of_cpu(smp_processor_id()); -#endif -} -#define TARGET_CPUS (target_cpus()) - -#if defined CONFIG_ES7000_CLUSTERED_APIC -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) -#define INT_DELIVERY_MODE (dest_LowestPrio) -#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ -#define NO_BALANCE_IRQ (1) -#undef WAKE_SECONDARY_VIA_INIT -#define WAKE_SECONDARY_VIA_MIP -#else -#define APIC_DFR_VALUE (APIC_DFR_FLAT) -#define INT_DELIVERY_MODE (dest_Fixed) -#define INT_DEST_MODE (0) /* phys delivery to target procs */ -#define NO_BALANCE_IRQ (0) -#undef APIC_DEST_LOGICAL -#define APIC_DEST_LOGICAL 0x0 -#define WAKE_SECONDARY_VIA_INIT -#endif - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} -static inline unsigned long check_apicid_present(int bit) -{ - return physid_isset(bit, phys_cpu_present_map); -} - -#define apicid_cluster(apicid) (apicid & 0xF0) - -static inline unsigned long calculate_ldr(int cpu) -{ - unsigned long id; - id = xapic_phys_to_log_apicid(cpu); - return (SET_APIC_LOGICAL_ID(id)); -} - -/* - * Set up the logical destination ID. - * - * Intel recommends to set DFR, LdR and TPR before enabling - * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel - * document number 292116). So here it goes... - */ -static inline void init_apic_ldr(void) -{ - unsigned long val; - int cpu = smp_processor_id(); - - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = calculate_ldr(cpu); - apic_write_around(APIC_LDR, val); -} - -extern void es7000_sw_apic(void); -static inline void enable_apic_mode(void) -{ - es7000_sw_apic(); - return; -} - -extern int apic_version [MAX_APICS]; -static inline void clustered_apic_check(void) -{ - int apic = bios_cpu_apicid[smp_processor_id()]; - printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", - (apic_version[apic] == 0x14) ? - "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); -} - -static inline int apicid_to_node(int logical_apicid) -{ - return 0; -} - - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (!mps_cpu) - return boot_cpu_physical_apicid; - else if (mps_cpu < NR_CPUS) - return (int) bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; -} - -static inline physid_mask_t apicid_to_cpu_present(int phys_apicid) -{ - static int id = 0; - physid_mask_t mask; - mask = physid_mask_of_physid(id); - ++id; - return mask; -} - -extern u8 cpu_2_logical_apicid[]; -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0xff); -} - -extern unsigned int boot_cpu_physical_apicid; -static inline int check_phys_apicid_present(int cpu_physical_apicid) -{ - boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); - return (1); -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - int num_bits_set; - int cpus_found = 0; - int cpu; - int apicid; - - num_bits_set = cpus_weight(cpumask); - /* Return id to all */ - if (num_bits_set == NR_CPUS) -#if defined CONFIG_ES7000_CLUSTERED_APIC - return 0xFF; -#else - return cpu_to_logical_apicid(0); -#endif - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. - */ - cpu = first_cpu(cpumask); - apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ - printk ("%s: Not a valid mask!\n",__FUNCTION__); -#if defined CONFIG_ES7000_CLUSTERED_APIC - return 0xFF; -#else - return cpu_to_logical_apicid(0); -#endif - } - apicid = new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return cpuid_apic >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach-summit/mach_apic.h --- a/xen/include/asm-x86/mach-summit/mach_apic.h Wed Apr 5 23:59:06 2006 +++ /dev/null Wed Apr 5 23:59:18 2006 @@ -1,167 +0,0 @@ -#ifndef __ASM_MACH_APIC_H -#define __ASM_MACH_APIC_H - -#include <xen/config.h> -#include <asm/smp.h> - -#define esr_disable (1) -#define NO_BALANCE_IRQ (0) - -/* In clustered mode, the high nibble of APIC ID is a cluster number. - * The low nibble is a 4-bit bitmap. */ -#define XAPIC_DEST_CPUS_SHIFT 4 -#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1) -#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT) - -#define APIC_DFR_VALUE (APIC_DFR_CLUSTER) - -static inline cpumask_t target_cpus(void) -{ - /* CPU_MASK_ALL (0xff) has undefined behaviour with - * dest_LowestPrio mode logical clustered apic interrupt routing - * Just start on cpu 0. IRQ balancing will spread load - */ - return cpumask_of_cpu(0); -} -#define TARGET_CPUS (target_cpus()) - -#define INT_DELIVERY_MODE (dest_LowestPrio) -#define INT_DEST_MODE 1 /* logical delivery broadcast to all procs */ - -static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) -{ - return 0; -} - -/* we don't use the phys_cpu_present_map to indicate apicid presence */ -static inline unsigned long check_apicid_present(int bit) -{ - return 1; -} - -#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK) - -extern u8 bios_cpu_apicid[]; -extern u8 cpu_2_logical_apicid[]; - -static inline void init_apic_ldr(void) -{ - unsigned long val, id; - int i, count; - u8 lid; - u8 my_id = (u8)hard_smp_processor_id(); - u8 my_cluster = (u8)apicid_cluster(my_id); - - /* Create logical APIC IDs by counting CPUs already in cluster. */ - for (count = 0, i = NR_CPUS; --i >= 0; ) { - lid = cpu_2_logical_apicid[i]; - if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster) - ++count; - } - /* We only have a 4 wide bitmap in cluster mode. If a deranged - * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ - BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); - id = my_cluster | (1UL << count); - apic_write_around(APIC_DFR, APIC_DFR_VALUE); - val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; - val |= SET_APIC_LOGICAL_ID(id); - apic_write_around(APIC_LDR, val); -} - -static inline int apic_id_registered(void) -{ - return 1; -} - -static inline void clustered_apic_check(void) -{ - printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", - nr_ioapics); -} - -static inline int apicid_to_node(int logical_apicid) -{ - return logical_apicid >> 5; /* 2 clusterids per CEC */ -} - -/* Mapping from cpu number to logical apicid */ -static inline int cpu_to_logical_apicid(int cpu) -{ - if (cpu >= NR_CPUS) - return BAD_APICID; - return (int)cpu_2_logical_apicid[cpu]; -} - -static inline int cpu_present_to_apicid(int mps_cpu) -{ - if (mps_cpu < NR_CPUS) - return (int)bios_cpu_apicid[mps_cpu]; - else - return BAD_APICID; -} - -static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) -{ - /* For clustered we don't have a good way to do this yet - hack */ - return physids_promote(0x0F); -} - -static inline physid_mask_t apicid_to_cpu_present(int apicid) -{ - return physid_mask_of_physid(0); -} - -static inline int check_phys_apicid_present(int boot_cpu_physical_apicid) -{ - return 1; -} - -static inline void enable_apic_mode(void) -{ -} - -static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) -{ - int num_bits_set; - int cpus_found = 0; - int cpu; - int apicid; - - num_bits_set = cpus_weight(cpumask); - /* Return id to all */ - if (num_bits_set == NR_CPUS) - return (int) 0xFF; - /* - * The cpus in the mask must all be on the apic cluster. If are not - * on the same apicid cluster return default value of TARGET_CPUS. - */ - cpu = first_cpu(cpumask); - apicid = cpu_to_logical_apicid(cpu); - while (cpus_found < num_bits_set) { - if (cpu_isset(cpu, cpumask)) { - int new_apicid = cpu_to_logical_apicid(cpu); - if (apicid_cluster(apicid) != - apicid_cluster(new_apicid)){ - printk ("%s: Not a valid mask!\n",__FUNCTION__); - return 0xFF; - } - apicid = apicid | new_apicid; - cpus_found++; - } - cpu++; - } - return apicid; -} - -/* cpuid returns the value latched in the HW at reset, not the APIC ID - * register's value. For any box whose BIOS changes APIC IDs, like - * clustered APIC systems, we must use hard_smp_processor_id. - * - * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. - */ -static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) -{ - return hard_smp_processor_id() >> index_msb; -} - -#endif /* __ASM_MACH_APIC_H */ diff -r 09967f2d6e3b -r ece9b5710b29 xen/include/asm-x86/mach_ipi.h --- a/xen/include/asm-x86/mach_ipi.h Wed Apr 5 23:59:06 2006 +++ /dev/null Wed Apr 5 23:59:18 2006 @@ -1,11 +0,0 @@ -#ifndef __ASM_MACH_IPI_H -#define __ASM_MACH_IPI_H - -#include <asm/genapic.h> - -void send_IPI_mask_bitmask(cpumask_t mask, int vector); -void send_IPI_mask_sequence(cpumask_t mask, int vector); - -#define send_IPI_mask (genapic->send_ipi_mask) - -#endif /* __ASM_MACH_IPI_H */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |