[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Linux 2.6 cleanups.



# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID 6fa6c392d258d827a79a6d2a70f55af74d3545fa
# Parent  6d4c0bfc3c1c15d1871d17698bfd78a9ea05aff5
Linux 2.6 cleanups.
Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>

diff -r 6d4c0bfc3c1c -r 6fa6c392d258 linux-2.6-xen-sparse/arch/xen/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/Makefile    Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/Makefile    Thu Sep  1 08:25:22 2005
@@ -65,6 +65,7 @@
 
 XINSTALL_NAME ?= $(KERNELRELEASE)
 install: vmlinuz
+install kernel_install:
        mkdir -p $(INSTALL_PATH)/boot
        ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) 
$(INSTALL_PATH)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
        rm -f $(INSTALL_PATH)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 linux-2.6-xen-sparse/arch/xen/i386/Kconfig
--- a/linux-2.6-xen-sparse/arch/xen/i386/Kconfig        Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/Kconfig        Thu Sep  1 08:25:22 2005
@@ -379,18 +379,18 @@
          If you don't know what to do here, say N.
 
 config SMP_ALTERNATIVES
-        bool "SMP alternatives support (EXPERIMENTAL)"
-        depends on SMP && EXPERIMENTAL
-        help
-          Try to reduce the overhead of running an SMP kernel on a uniprocessor
-          host slightly by replacing certain key instruction sequences
-          according to whether we currently have more than one CPU available.
-          This should provide a noticeable boost to performance when
-          running SMP kernels on UP machines, and have negligible impact
-          when running on an true SMP host.
+       bool "SMP alternatives support (EXPERIMENTAL)"
+       depends on SMP && EXPERIMENTAL
+       help
+         Try to reduce the overhead of running an SMP kernel on a uniprocessor
+         host slightly by replacing certain key instruction sequences
+         according to whether we currently have more than one CPU available.
+         This should provide a noticeable boost to performance when
+         running SMP kernels on UP machines, and have negligible impact
+         when running on an true SMP host.
 
           If unsure, say N.
-
+         
 config NR_CPUS
        int "Maximum number of CPUs (2-255)"
        range 2 255
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile        Wed Aug 31 
10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/Makefile        Thu Sep  1 
08:25:22 2005
@@ -5,6 +5,7 @@
 XENARCH        := $(subst ",,$(CONFIG_XENARCH))
 
 CFLAGS += -Iarch/$(XENARCH)/kernel
+AFLAGS += -Iarch/$(XENARCH)/kernel
 
 extra-y := head.o init_task.o
 
@@ -32,7 +33,7 @@
 obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o
 c-obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
-c-obj-$(CONFIG_X86_REBOOTFIXUPS)+= reboot_fixups.o
+c-obj-$(CONFIG_X86_REBOOTFIXUPS)       += reboot_fixups.o
 c-obj-$(CONFIG_X86_NUMAQ)      += numaq.o
 c-obj-$(CONFIG_X86_SUMMIT_NUMA)        += summit.o
 c-obj-$(CONFIG_MODULES)                += module.o
@@ -69,7 +70,7 @@
 
 $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
 $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
-                     $(obj)/vsyscall-%.o FORCE
+                     $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
        $(call if_changed,syscall)
 
 # We also create a special relocatable object that should mirror the symbol
@@ -81,20 +82,17 @@
 
 SYSCFLAGS_vsyscall-syms.o = -r
 $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
-                       $(obj)/vsyscall-sysenter.o FORCE
+                       $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
        $(call if_changed,syscall)
 
 c-link :=
-s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o 
vsyscall.lds.o syscall_table.o
+s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o 
vsyscall.lds.o vsyscall-note.o
 
 $(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-obj-m) $(c-link)) $(patsubst 
%.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
        @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
 
 $(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
 
-EXTRA_AFLAGS   += -I$(obj)
-$(obj)/entry.o: $(src)/entry.S $(src)/syscall_table.S
-
 obj-y  += $(c-obj-y) $(s-obj-y)
 obj-m  += $(c-obj-m)
 
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/entry.S Thu Sep  1 08:25:22 2005
@@ -47,6 +47,7 @@
 #include <asm/segment.h>
 #include <asm/smp.h>
 #include <asm/page.h>
+#include <asm/desc.h>
 #include "irq_vectors.h"
 #include <asm-xen/xen-public/xen.h>
 
@@ -112,7 +113,7 @@
                                XEN_BLOCK_EVENTS(%esi)
 #else
 #define preempt_stop
-#define resume_kernel          restore_all
+#define resume_kernel          restore_nocheck
 #endif
 
 #define SAVE_ALL \
@@ -161,11 +162,9 @@
        addl $4, %esp;  \
 1:     iret;           \
 .section .fixup,"ax";   \
-2:     movl $(__USER_DS), %edx; \
-       movl %edx, %ds; \
-       movl %edx, %es; \
-       movl $11,%eax;  \
-       call do_exit;   \
+2:     pushl $0;       \
+       pushl $do_iret_error;   \
+       jmp error_code; \
 .previous;             \
 .section __ex_table,"a";\
        .align 4;       \
@@ -196,7 +195,7 @@
        movl EFLAGS(%esp), %eax         # mix EFLAGS and CS
        movb CS(%esp), %al
        testl $(VM_MASK | 2), %eax
-       jz resume_kernel                # returning to kernel or vm86-space
+       jz resume_kernel
 ENTRY(resume_userspace)
        XEN_BLOCK_EVENTS(%esi)          # make sure we don't miss an interrupt
                                        # setting need_resched or sigpending
@@ -211,7 +210,7 @@
 ENTRY(resume_kernel)
        XEN_BLOCK_EVENTS(%esi)
        cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
-       jnz restore_all
+       jnz restore_nocheck
 need_resched:
        movl TI_flags(%ebp), %ecx       # need_resched set ?
        testb $_TIF_NEED_RESCHED, %cl
@@ -252,7 +251,8 @@
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
 
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not 
testb */
+       testw 
$(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -276,7 +276,8 @@
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
                                        # system call tracing in operation
-       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not 
testb */
+       testw 
$(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -290,7 +291,20 @@
        movl TI_flags(%ebp), %ecx
        testw $_TIF_ALLWORK_MASK, %cx   # current->work
        jne syscall_exit_work
+
 restore_all:
+#if 0 /* XEN */
+       movl EFLAGS(%esp), %eax         # mix EFLAGS, SS and CS
+       # Warning: OLDSS(%esp) contains the wrong/random values if we
+       # are returning to the kernel.
+       # See comments in process.c:copy_thread() for details.
+       movb OLDSS(%esp), %ah
+       movb CS(%esp), %al
+       andl $(VM_MASK | (4 << 8) | 3), %eax
+       cmpl $((4 << 8) | 3), %eax
+       je ldt_ss                       # returning to user-space with LDT SS
+#endif /* XEN */
+restore_nocheck:
        testl $VM_MASK, EFLAGS(%esp)
        jnz resume_vm86
        movb EVENT_MASK(%esp), %al
@@ -300,7 +314,19 @@
        andb $1,%al                     # %al == mask & ~saved_mask
        jnz restore_all_enable_events   #     != 0 => reenable event delivery
        XEN_PUT_VCPU_INFO(%esi)
-       RESTORE_ALL
+       RESTORE_REGS
+       addl $4, %esp
+1:     iret
+.section .fixup,"ax"
+iret_exc:
+       pushl $0                        # no error code
+       pushl $do_iret_error
+       jmp error_code
+.previous
+.section __ex_table,"a"
+       .align 4
+       .long 1b,iret_exc
+.previous
 
 resume_vm86:
        XEN_UNBLOCK_EVENTS(%esi)
@@ -309,6 +335,33 @@
        movl $__HYPERVISOR_switch_vm86,%eax
        int $0x82
        ud2
+
+#if 0 /* XEN */
+ldt_ss:
+       larl OLDSS(%esp), %eax
+       jnz restore_nocheck
+       testl $0x00400000, %eax         # returning to 32bit stack?
+       jnz restore_nocheck             # allright, normal return
+       /* If returning to userspace with 16bit stack,
+        * try to fix the higher word of ESP, as the CPU
+        * won't restore it.
+        * This is an "official" bug of all the x86-compatible
+        * CPUs, which we can try to work around to make
+        * dosemu and wine happy. */
+       subl $8, %esp           # reserve space for switch16 pointer
+       cli
+       movl %esp, %eax
+       /* Set up the 16bit stack frame with switch32 pointer on top,
+        * and a switch16 pointer on top of the current frame. */
+       call setup_x86_bogus_stack
+       RESTORE_REGS
+       lss 20+4(%esp), %esp    # switch to 16bit stack
+1:     iret
+.section __ex_table,"a"
+       .align 4
+       .long 1b,iret_exc
+.previous
+#endif /* XEN */
 
        # perform work that needs to be done immediately before resumption
        ALIGN
@@ -385,6 +438,27 @@
        jmp resume_userspace
 
 #if 0 /* XEN */
+#define FIXUP_ESPFIX_STACK \
+       movl %esp, %eax; \
+       /* switch to 32bit stack using the pointer on top of 16bit stack */ \
+       lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
+       /* copy data from 16bit stack to 32bit stack */ \
+       call fixup_x86_bogus_stack; \
+       /* put ESP to the proper location */ \
+       movl %eax, %esp;
+#define UNWIND_ESPFIX_STACK \
+       pushl %eax; \
+       movl %ss, %eax; \
+       /* see if on 16bit stack */ \
+       cmpw $__ESPFIX_SS, %ax; \
+       jne 28f; \
+       movl $__KERNEL_DS, %edx; \
+       movl %edx, %ds; \
+       movl %edx, %es; \
+       /* switch to 32bit stack */ \
+       FIXUP_ESPFIX_STACK \
+28:    popl %eax;
+
 /*
  * Build the entry stubs and pointer table with
  * some assembler magic.
@@ -440,7 +514,9 @@
        pushl %ecx
        pushl %ebx
        cld
-       movl %es, %ecx
+       pushl %es
+#      UNWIND_ESPFIX_STACK
+       popl %ecx
        movl ES(%esp), %edi             # get the function address
        movl ORIG_EAX(%esp), %edx       # get the error code
        movl %eax, ORIG_EAX(%esp)
@@ -625,6 +701,11 @@
  * fault happened on the sysenter path.
  */
 ENTRY(nmi)
+       pushl %eax
+       movl %ss, %eax
+       cmpw $__ESPFIX_SS, %ax
+       popl %eax
+       je nmi_16bit_stack
        cmpl $sysenter_entry,(%esp)
        je nmi_stack_fixup
        pushl %eax
@@ -644,7 +725,7 @@
        xorl %edx,%edx          # zero error code
        movl %esp,%eax          # pt_regs pointer
        call do_nmi
-       RESTORE_ALL
+       jmp restore_all
 
 nmi_stack_fixup:
        FIX_STACK(12,nmi_stack_correct, 1)
@@ -659,6 +740,29 @@
 nmi_debug_stack_fixup:
        FIX_STACK(24,nmi_stack_correct, 1)
        jmp nmi_stack_correct
+
+nmi_16bit_stack:
+       /* create the pointer to lss back */
+       pushl %ss
+       pushl %esp
+       movzwl %sp, %esp
+       addw $4, (%esp)
+       /* copy the iret frame of 12 bytes */
+       .rept 3
+       pushl 16(%esp)
+       .endr
+       pushl %eax
+       SAVE_ALL
+       FIXUP_ESPFIX_STACK              # %eax == %esp
+       xorl %edx,%edx                  # zero error code
+       call do_nmi
+       RESTORE_REGS
+       lss 12+4(%esp), %esp            # back to 16bit stack
+1:     iret
+.section __ex_table,"a"
+       .align 4
+       .long 1b,iret_exc
+.previous
 #endif /* XEN */
 
 ENTRY(int3)
@@ -725,7 +829,9 @@
        pushl %ecx
        pushl %ebx
        cld
-       movl %es,%edi
+       pushl %es
+#      UNWIND_ESPFIX_STACK
+       popl %edi
        movl ES(%esp), %ecx             /* get the faulting address */
        movl ORIG_EAX(%esp), %edx       /* get the error code */
        movl %eax, ORIG_EAX(%esp)
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S  Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/head.S  Thu Sep  1 08:25:22 2005
@@ -179,7 +179,7 @@
        .quad 0x0000000000000000        /* 0xc0 APM CS 16 code (16 bit) */
        .quad 0x0000000000000000        /* 0xc8 APM DS    data */
 
-       .quad 0x0000000000000000        /* 0xd0 - unused */
+       .quad 0x0000000000000000        /* 0xd0 - ESPFIX 16-bit SS */
        .quad 0x0000000000000000        /* 0xd8 - unused */
        .quad 0x0000000000000000        /* 0xe0 - unused */
        .quad 0x0000000000000000        /* 0xe8 - unused */
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c   Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/irq.c   Thu Sep  1 08:25:22 2005
@@ -242,12 +242,12 @@
        } else if (i == NR_IRQS) {
                seq_printf(p, "NMI: ");
                for_each_cpu(j)
-                       seq_printf(p, "%10u ", nmi_count(j));
+                       seq_printf(p, "%10u ", nmi_count(j));
                seq_putc(p, '\n');
 #ifdef CONFIG_X86_LOCAL_APIC
                seq_printf(p, "LOC: ");
                for_each_cpu(j)
-                       seq_printf(p, "%10u ", per_cpu(irq_stat, 
j).apic_timer_irqs);
+                       seq_printf(p, "%10u ", 
per_cpu(irq_stat,j).apic_timer_irqs);
                seq_putc(p, '\n');
 #endif
                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
@@ -263,6 +263,7 @@
 void fixup_irqs(cpumask_t map)
 {
        unsigned int irq;
+       static int warned;
 
        for (irq = 0; irq < NR_IRQS; irq++) {
                cpumask_t mask;
@@ -276,7 +277,7 @@
                }
                if (irq_desc[irq].handler->set_affinity)
                        irq_desc[irq].handler->set_affinity(irq, mask);
-               else if (irq_desc[irq].action)
+               else if (irq_desc[irq].action && !(warned++))
                        printk("Cannot set affinity for irq %i\n", irq);
        }
 
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Wed Aug 31 
10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/process.c       Thu Sep  1 
08:25:22 2005
@@ -467,7 +467,6 @@
        return 1;
 }
 
-
 /*
  *     switch_to(x,yn) should switch tasks from x to y.
  *
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/setup.c Thu Sep  1 08:25:22 2005
@@ -1078,12 +1078,12 @@
 void __init setup_bootmem_allocator(void);
 static unsigned long __init setup_memory(void)
 {
-
        /*
         * partially used pages are not usable - thus
         * we are rounding upwards:
         */
-       min_low_pfn = PFN_UP(__pa(xen_start_info.pt_base)) + 
xen_start_info.nr_pt_frames;
+       min_low_pfn = PFN_UP(__pa(xen_start_info.pt_base)) +
+               xen_start_info.nr_pt_frames;
 
        find_max_pfn();
 
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Wed Aug 31 
10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c       Thu Sep  1 
08:25:22 2005
@@ -856,9 +856,6 @@
        cpu_gdt_descr[cpu].address = __get_free_page(GFP_KERNEL|__GFP_ZERO);
        BUG_ON(cpu_gdt_descr[0].size > PAGE_SIZE);
        cpu_gdt_descr[cpu].size = cpu_gdt_descr[0].size;
-       printk("GDT: copying %d bytes from %lx to %lx\n",
-               cpu_gdt_descr[0].size, cpu_gdt_descr[0].address,
-               cpu_gdt_descr[cpu].address); 
        memcpy((void *)cpu_gdt_descr[cpu].address,
               (void *)cpu_gdt_descr[0].address, cpu_gdt_descr[0].size);
 
@@ -1274,6 +1271,7 @@
                        printk(KERN_WARNING "WARNING: %d siblings found for 
CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
                        smp_num_siblings = siblings;
                }
+
                if (c->x86_num_cores > 1) {
                        for (i = 0; i < NR_CPUS; i++) {
                                if (!cpu_isset(i, cpu_callout_map))
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/traps.c Thu Sep  1 08:25:22 2005
@@ -449,10 +449,10 @@
 DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present)
 DO_ERROR(12, SIGBUS,  "stack segment", stack_segment)
 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
-DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
 #ifdef CONFIG_X86_MCE
 DO_ERROR(18, SIGBUS, "machine check", machine_check)
 #endif
+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
 
 fastcall void do_general_protection(struct pt_regs * regs, long error_code)
 {
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Thu Sep  1 08:25:22 2005
@@ -19,37 +19,122 @@
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 
-#ifndef CONFIG_XEN_PHYSDEV_ACCESS
-
-void * __ioremap(unsigned long phys_addr, unsigned long size,
-                unsigned long flags)
-{
-       return NULL;
-}
-
-void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
-{
-       return NULL;
-}
-
-void iounmap(volatile void __iomem *addr)
-{
-}
-
-#ifdef __i386__
-
-void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
-{
-       return NULL;
-}
-
-void __init bt_iounmap(void *addr, unsigned long size)
-{
-}
-
-#endif /* __i386__ */
-
-#else
+#define ISA_START_ADDRESS      0x0
+#define ISA_END_ADDRESS                0x100000
+
+/* These hacky macros avoid phys->machine translations. */
+#define __direct_pte(x) ((pte_t) { (x) } )
+#define __direct_mk_pte(page_nr,pgprot) \
+  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
+#define direct_mk_pte_phys(physpage, pgprot) \
+  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
+
+
+static int direct_remap_area_pte_fn(pte_t *pte, 
+                                   struct page *pte_page,
+                                   unsigned long address, 
+                                   void *data)
+{
+       mmu_update_t **v = (mmu_update_t **)data;
+
+       (*v)->ptr = ((maddr_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
+                    PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
+       (*v)++;
+
+       return 0;
+}
+
+int direct_remap_area_pages(struct mm_struct *mm,
+                           unsigned long address, 
+                           unsigned long machine_addr,
+                           unsigned long size, 
+                           pgprot_t prot,
+                           domid_t  domid)
+{
+       int i;
+       unsigned long start_address;
+#define MAX_DIRECTMAP_MMU_QUEUE 130
+       mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u, *w = u;
+
+       start_address = address;
+
+       flush_cache_all();
+
+       for (i = 0; i < size; i += PAGE_SIZE) {
+               if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
+                       /* Fill in the PTE pointers. */
+                       generic_page_range(mm, start_address, 
+                                          address - start_address,
+                                          direct_remap_area_pte_fn, &w);
+                       w = u;
+                       if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
+                               return -EFAULT;
+                       v = u;
+                       start_address = address;
+               }
+
+               /*
+                * Fill in the machine address: PTE ptr is done later by
+                * __direct_remap_area_pages(). 
+                */
+               v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, 
prot));
+
+               machine_addr += PAGE_SIZE;
+               address += PAGE_SIZE; 
+               v++;
+       }
+
+       if (v != u) {
+               /* get the ptep's filled in */
+               generic_page_range(mm, start_address, address - start_address,
+                                  direct_remap_area_pte_fn, &w);
+               if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
+                       return -EFAULT;
+       }
+
+       flush_tlb_all();
+
+       return 0;
+}
+
+EXPORT_SYMBOL(direct_remap_area_pages);
+
+static int lookup_pte_fn(
+       pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
+{
+       unsigned long *ptep = (unsigned long *)data;
+       if (ptep)
+               *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) <<
+                        PAGE_SHIFT) |
+                       ((unsigned long)pte & ~PAGE_MASK);
+       return 0;
+}
+
+int create_lookup_pte_addr(struct mm_struct *mm, 
+                          unsigned long address,
+                          unsigned long *ptep)
+{
+       return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
+}
+
+EXPORT_SYMBOL(create_lookup_pte_addr);
+
+static int noop_fn(
+       pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
+{
+       return 0;
+}
+
+int touch_pte_range(struct mm_struct *mm,
+                   unsigned long address,
+                   unsigned long size)
+{
+       return generic_page_range(mm, address, size, noop_fn, NULL);
+} 
+
+EXPORT_SYMBOL(touch_pte_range);
+
+#ifdef CONFIG_XEN_PHYSDEV_ACCESS
 
 /*
  * Does @address reside within a non-highmem page that is local to this virtual
@@ -90,13 +175,12 @@
        if (!size || last_addr < phys_addr)
                return NULL;
 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
        /*
         * Don't remap the low PCI/ISA area, it's always mapped..
         */
-       if (phys_addr >= 0x0 && last_addr < 0x100000)
-               return isa_bus_to_virt(phys_addr);
-#endif
+       if (xen_start_info.flags & SIF_PRIVILEGED &&
+           phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
+               return (void __iomem *) isa_bus_to_virt(phys_addr);
 
        /*
         * Don't allow anybody to remap normal RAM that we're using..
@@ -203,24 +287,32 @@
 {
        struct vm_struct *p;
        if ((void __force *) addr <= high_memory) 
-               return; 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
+               return;
+
+       /*
+        * __ioremap special-cases the PCI/ISA range by not instantiating a
+        * vm_area and by simply returning an address into the kernel mapping
+        * of ISA space.   So handle that here.
+        */
        if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
                return;
-#endif
-       p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+
+       write_lock(&vmlist_lock);
+       p = __remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) 
addr));
        if (!p) { 
-               printk("__iounmap: bad address %p\n", addr);
-               return;
+               printk("iounmap: bad address %p\n", addr);
+               goto out_unlock;
        }
 
        if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
                /* p->size includes the guard page, but cpa doesn't like that */
                change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
                                 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
-                                PAGE_KERNEL);                           
+                                PAGE_KERNEL);
                global_flush_tlb();
        } 
+out_unlock:
+       write_unlock(&vmlist_lock);
        kfree(p); 
 }
 
@@ -237,13 +329,12 @@
        if (!size || last_addr < phys_addr)
                return NULL;
 
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
        /*
         * Don't remap the low PCI/ISA area, it's always mapped..
         */
-       if (phys_addr >= 0x0 && last_addr < 0x100000)
+       if (xen_start_info.flags & SIF_PRIVILEGED &&
+           phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
                return isa_bus_to_virt(phys_addr);
-#endif
 
        /*
         * Mappings have to be page-aligned
@@ -282,10 +373,8 @@
        virt_addr = (unsigned long)addr;
        if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
                return;
-#ifdef CONFIG_XEN_PRIVILEGED_GUEST
        if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
                return;
-#endif
        offset = virt_addr & ~PAGE_MASK;
        nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
 
@@ -299,119 +388,37 @@
 
 #endif /* __i386__ */
 
+#else /* CONFIG_XEN_PHYSDEV_ACCESS */
+
+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size,
+                        unsigned long flags)
+{
+       return NULL;
+}
+
+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+{
+       return NULL;
+}
+
+void iounmap(volatile void __iomem *addr)
+{
+}
+
+#ifdef __i386__
+
+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
+{
+       return NULL;
+}
+
+void __init bt_iounmap(void *addr, unsigned long size)
+{
+}
+
+#endif /* __i386__ */
+
 #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
-
-/* These hacky macros avoid phys->machine translations. */
-#define __direct_pte(x) ((pte_t) { (x) } )
-#define __direct_mk_pte(page_nr,pgprot) \
-  __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
-#define direct_mk_pte_phys(physpage, pgprot) \
-  __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
-
-static int direct_remap_area_pte_fn(pte_t *pte, 
-                                   struct page *pte_page,
-                                   unsigned long address, 
-                                   void *data)
-{
-       mmu_update_t **v = (mmu_update_t **)data;
-
-       (*v)->ptr = ((maddr_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
-                    PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
-       (*v)++;
-
-       return 0;
-}
-
-int direct_remap_area_pages(struct mm_struct *mm,
-                           unsigned long address, 
-                           unsigned long machine_addr,
-                           unsigned long size, 
-                           pgprot_t prot,
-                           domid_t  domid)
-{
-       int i;
-       unsigned long start_address;
-#define MAX_DIRECTMAP_MMU_QUEUE 130
-       mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *v = u, *w = u;
-
-       start_address = address;
-
-       flush_cache_all();
-
-       for (i = 0; i < size; i += PAGE_SIZE) {
-               if ((v - u) == MAX_DIRECTMAP_MMU_QUEUE) {
-                       /* Fill in the PTE pointers. */
-                       generic_page_range(mm, start_address, 
-                                          address - start_address,
-                                          direct_remap_area_pte_fn, &w);
-                       w = u;
-                       if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
-                               return -EFAULT;
-                       v = u;
-                       start_address = address;
-               }
-
-               /*
-                * Fill in the machine address: PTE ptr is done later by
-                * __direct_remap_area_pages(). 
-                */
-               v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, 
prot));
-
-               machine_addr += PAGE_SIZE;
-               address += PAGE_SIZE; 
-               v++;
-       }
-
-       if (v != u) {
-               /* get the ptep's filled in */
-               generic_page_range(mm, start_address, address - start_address,
-                                  direct_remap_area_pte_fn, &w);
-               if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
-                       return -EFAULT;
-       }
-
-       flush_tlb_all();
-
-       return 0;
-}
-
-EXPORT_SYMBOL(direct_remap_area_pages);
-
-static int lookup_pte_fn(
-       pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-{
-       unsigned long *ptep = (unsigned long *)data;
-       if (ptep)
-               *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) <<
-                        PAGE_SHIFT) |
-                       ((unsigned long)pte & ~PAGE_MASK);
-       return 0;
-}
-
-int create_lookup_pte_addr(struct mm_struct *mm, 
-                          unsigned long address,
-                          unsigned long *ptep)
-{
-       return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
-}
-
-EXPORT_SYMBOL(create_lookup_pte_addr);
-
-static int noop_fn(
-       pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
-{
-       return 0;
-}
-
-int touch_pte_range(struct mm_struct *mm,
-                   unsigned long address,
-                   unsigned long size)
-{
-       return generic_page_range(mm, address, size, noop_fn, NULL);
-} 
-
-EXPORT_SYMBOL(touch_pte_range);
 
 /*
  * Local variables:
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig      Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/Kconfig      Thu Sep  1 08:25:22 2005
@@ -21,12 +21,12 @@
          classical 32-bit x86 architecture. For details see
          <http://www.x86-64.org/>.
 
-config X86
-       bool
-       default y
-
 config 64BIT
        def_bool y
+
+config X86
+       bool
+       default y
 
 config MMU
        bool
@@ -89,10 +89,11 @@
 #        Optimize for AMD Opteron/Athlon64/Hammer/K8 CPUs.
 
 config MPSC
-       bool "Intel x86-64"
+       bool "Intel EM64T"
        help
-         Optimize for Intel IA32 with 64bit extension CPUs
-         (Prescott/Nocona/Potomac)
+         Optimize for Intel Pentium 4 and Xeon CPUs with Intel
+         Extended Memory 64 Technology(EM64T). For details see
+         <http://www.intel.com/technology/64bitextensions/>.
 
 config GENERIC_CPU
        bool "Generic-x86-64"
@@ -367,7 +368,6 @@
 
          If unsure, say Y. Only embedded should say N here.
 
-
 endmenu
 
 #
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 linux-2.6-xen-sparse/drivers/char/mem.c
--- a/linux-2.6-xen-sparse/drivers/char/mem.c   Wed Aug 31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/drivers/char/mem.c   Thu Sep  1 08:25:22 2005
@@ -231,7 +231,7 @@
 }
 #endif
 
-static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+static int mmap_mem(struct file * file, struct vm_area_struct * vma)
 {
 #if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
@@ -258,7 +258,6 @@
        return 0;
 }
 
-#if 0
 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
 {
         unsigned long long val;
@@ -275,7 +274,6 @@
        vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
        return mmap_mem(file, vma);
 }
-#endif
 
 extern long vread(char *buf, char *addr, unsigned long count);
 extern long vwrite(char *buf, char *addr, unsigned long count);
@@ -731,7 +729,7 @@
        .llseek         = memory_lseek,
        .read           = read_mem,
        .write          = write_mem,
-       .mmap           = mmap_kmem,
+       .mmap           = mmap_mem,
        .open           = open_mem,
 };
 #else
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h       Wed Aug 
31 10:24:43 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/mmu_context.h       Thu Sep 
 1 08:25:22 2005
@@ -35,9 +35,9 @@
         * happen before reload of cr3/ldt (i.e., not in __switch_to).
         */
        asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
-               : "=m" (*(int *)&current->thread.fs),
-                 "=m" (*(int *)&current->thread.gs));
-       asm volatile ( "mov %0,%%fs ; mov %0,%%gs"
+               : "=m" (current->thread.fs),
+                 "=m" (current->thread.gs));
+       asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
                : : "r" (0) );
 }
 
diff -r 6d4c0bfc3c1c -r 6fa6c392d258 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h Wed Aug 31 
10:24:43 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/processor.h Thu Sep  1 
08:25:22 2005
@@ -517,8 +517,8 @@
  * This special macro can be used to load a debugging register
  */
 #define loaddebug(thread,register) \
-       HYPERVISOR_set_debugreg((register),     \
-                       ((thread)->debugreg[register]))
+               HYPERVISOR_set_debugreg((register), \
+                                       ((thread)->debugreg[register]))
 
 /* Forward declaration, a strange C thing */
 struct task_struct;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.