[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] (no subject)



# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID 51484df99be1195e07d06021dabb414167993c40
# Parent  55f73916d319801065ffcdf6041b54e2770b9c03
# Parent  1a84eec7433193dc7277b7a84930e18d88475486
Merged.
---
 linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile   |    2 
 linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c |  767 -----------
 linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h |   40 
 linux-2.6-xen-sparse/include/xen/tpmfe.h             |   40 
 .hgignore                                            |    2 
 docs/misc/vtpm.txt                                   |   10 
 extras/mini-os/Makefile                              |    8 
 extras/mini-os/include/lib.h                         |    1 
 extras/mini-os/include/os.h                          |    3 
 extras/mini-os/include/types.h                       |    2 
 extras/mini-os/sched.c                               |    2 
 extras/mini-os/traps.c                               |   10 
 extras/mini-os/x86_32.S                              |   53 
 extras/mini-os/x86_64.S                              |  273 +---
 linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre   |   22 
 linux-2.6-xen-sparse/drivers/char/tpm/Kconfig        |    3 
 linux-2.6-xen-sparse/drivers/char/tpm/Makefile       |    2 
 linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.c     |  546 ++++++++
 linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.h     |   38 
 linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c      | 1247 +++++++++++--------
 linux-2.6-xen-sparse/drivers/xen/Kconfig             |    8 
 linux-2.6-xen-sparse/drivers/xen/Makefile            |    1 
 linux-2.6-xen-sparse/drivers/xen/tpmback/common.h    |    2 
 linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c |    3 
 linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c   |   83 -
 linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c  |    2 
 linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h   |    1 
 linux-2.6-xen-sparse/mm/Kconfig                      |    6 
 tools/python/xen/xend/XendBootloader.py              |    8 
 tools/python/xen/xend/XendDomainInfo.py              |    5 
 tools/python/xen/xm/create.py                        |   17 
 xen/arch/x86/hvm/svm/svm.c                           |   72 -
 xen/arch/x86/hvm/svm/vmcb.c                          |   55 
 xen/arch/x86/hvm/vmx/vmx.c                           |   41 
 xen/arch/x86/x86_32/mm.c                             |   15 
 xen/arch/x86/x86_64/mm.c                             |   18 
 36 files changed, 1635 insertions(+), 1773 deletions(-)

diff -r 55f73916d319 -r 51484df99be1 .hgignore
--- a/.hgignore Tue May 02 18:17:59 2006 +0100
+++ b/.hgignore Thu May 04 14:19:19 2006 +0100
@@ -14,6 +14,7 @@
 .*\.orig$
 .*\.rej$
 .*/a\.out$
+.*cscope\.*$
 ^[^/]*\.bz2$
 ^TAGS$
 ^dist/.*$
@@ -184,7 +185,6 @@
 ^tools/xm-test/ramdisk/buildroot
 ^xen/BLOG$
 ^xen/TAGS$
-^xen/cscope\.*$
 ^xen/arch/x86/asm-offsets\.s$
 ^xen/arch/x86/boot/mkelf32$
 ^xen/arch/x86/xen\.lds$
diff -r 55f73916d319 -r 51484df99be1 docs/misc/vtpm.txt
--- a/docs/misc/vtpm.txt        Tue May 02 18:17:59 2006 +0100
+++ b/docs/misc/vtpm.txt        Thu May 04 14:19:19 2006 +0100
@@ -21,17 +21,23 @@ linux-2.6.??-xen/.config file:
 linux-2.6.??-xen/.config file:
 
 CONFIG_XEN_TPMDEV_BACKEND=y
-CONFIG_XEN_TPMDEV_GRANT=y
 
-CONFIG_TCG_TPM=m
+CONFIG_TCG_TPM=y
 CONFIG_TCG_NSC=m
 CONFIG_TCG_ATMEL=m
+CONFIG_TCG_XEN=y
 
 You must also enable the virtual TPM to be built:
 
 In Config.mk in the Xen root directory set the line
 
 VTPM_TOOLS ?= y
+
+and in
+
+tools/vtpm/Rules.mk set the line
+
+BUILD_EMULATOR = y
 
 Now build the Xen sources from Xen's root directory:
 
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/Makefile
--- a/extras/mini-os/Makefile   Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/Makefile   Thu May 04 14:19:19 2006 +0100
@@ -60,4 +60,12 @@ clean:
 %.o: %.S $(HDRS) Makefile
        $(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
 
+define all_sources
+     ( find . -follow -name SCCS -prune -o -name '*.[chS]' -print )
+endef
 
+.PHONY: cscope
+cscope:
+       $(all_sources) > cscope.files
+       cscope -k -b -q
+
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/include/lib.h
--- a/extras/mini-os/include/lib.h      Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/include/lib.h      Thu May 04 14:19:19 2006 +0100
@@ -56,6 +56,7 @@
 #define _LIB_H_
 
 #include <stdarg.h>
+#include <stddef.h>
 #include <console.h>
 
 /* printing */
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/include/os.h
--- a/extras/mini-os/include/os.h       Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/include/os.h       Thu May 04 14:19:19 2006 +0100
@@ -6,9 +6,6 @@
 
 #ifndef _OS_H_
 #define _OS_H_
-
-#define NULL 0
-
 
 #if __GNUC__ == 2 && __GNUC_MINOR__ < 96
 #define __builtin_expect(x, expected_value) (x)
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/include/types.h
--- a/extras/mini-os/include/types.h    Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/include/types.h    Thu May 04 14:19:19 2006 +0100
@@ -34,8 +34,6 @@ typedef unsigned long       u64;
 typedef unsigned long       u64;
 #endif
 
-typedef unsigned int        size_t;
-
 /* FreeBSD compat types */
 typedef unsigned char       u_char;
 typedef unsigned int        u_int;
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/sched.c
--- a/extras/mini-os/sched.c    Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/sched.c    Thu May 04 14:19:19 2006 +0100
@@ -324,7 +324,7 @@ void th_f2(void *data)
 
 void init_sched(void)
 {
-    printk("Initialising scheduler, idle_thread %p\n", idle_thread);
+    printk("Initialising scheduler\n");
 
     idle_thread = create_thread("Idle", idle_thread_fn, NULL);
     INIT_LIST_HEAD(&idle_thread->thread_list);
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/traps.c
--- a/extras/mini-os/traps.c    Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/traps.c    Thu May 04 14:19:19 2006 +0100
@@ -123,8 +123,13 @@ void do_page_fault(struct pt_regs *regs,
 void do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
     unsigned long addr = read_cr2();
-    printk("Page fault at linear address %p, regs %p, code %lx\n", addr, regs,
-          error_code);
+#if defined(__x86_64__)
+    printk("Page fault at linear address %p, rip %p, code %lx\n",
+           addr, regs->rip, error_code);
+#else
+    printk("Page fault at linear address %p, eip %p, code %lx\n",
+           addr, regs->eip, error_code);
+#endif
     dump_regs(regs);
     page_walk(addr);
     do_exit();
@@ -195,7 +200,6 @@ static trap_info_t trap_table[] = {
     { 15, 0, __KERNEL_CS, (unsigned long)spurious_interrupt_bug      },
     { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error           },
     { 17, 0, __KERNEL_CS, (unsigned long)alignment_check             },
-    { 18, 0, __KERNEL_CS, (unsigned long)machine_check               },
     { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error      },
     {  0, 0,           0, 0                           }
 };
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/x86_32.S
--- a/extras/mini-os/x86_32.S   Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/x86_32.S   Thu May 04 14:19:19 2006 +0100
@@ -30,10 +30,10 @@ hypercall_page:
 hypercall_page:
         .org 0x3000
 
-ES             = 0x1c
-ORIG_EAX       = 0x20
-EIP            = 0x24
-CS             = 0x28
+ES             = 0x20
+ORIG_EAX       = 0x24
+EIP            = 0x28
+CS             = 0x2C
 
 #define ENTRY(X) .globl X ; X :
 
@@ -69,7 +69,7 @@ ENTRY(divide_error)
        pushl $0                # no error code
        pushl $do_divide_error
 do_exception:
-       pushl %ds
+    pushl %ds
        pushl %eax
        xorl %eax, %eax
        pushl %ebp
@@ -92,7 +92,7 @@ do_exception:
     pushl %edx
     pushl %eax
        call *%edi
-    addl $8,%esp
+    jmp ret_from_exception
     
 ret_from_exception:
         movb CS(%esp),%cl
@@ -223,69 +223,54 @@ ENTRY(invalid_op)
        pushl $do_invalid_op
        jmp do_exception
 
+
 ENTRY(coprocessor_segment_overrun)
        pushl $0
        pushl $do_coprocessor_segment_overrun
        jmp do_exception
+
 
 ENTRY(invalid_TSS)
        pushl $do_invalid_TSS
        jmp do_exception
 
+
 ENTRY(segment_not_present)
        pushl $do_segment_not_present
        jmp do_exception
 
+
 ENTRY(stack_segment)
        pushl $do_stack_segment
        jmp do_exception
 
+
 ENTRY(general_protection)
        pushl $do_general_protection
        jmp do_exception
 
+
 ENTRY(alignment_check)
        pushl $do_alignment_check
        jmp do_exception
 
-# This handler is special, because it gets an extra value on its stack,
-# which is the linear faulting address.
-# fastcall register usage:  %eax = pt_regs, %edx = error code,
-#                          %ecx = fault address
+
 ENTRY(page_fault)
-       pushl %ds
-       pushl %eax
-       xorl %eax, %eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       decl %eax                       /* eax = -1 */
-       pushl %ecx
-       pushl %ebx
-       cld
-       movl ORIG_EAX(%esp), %edi
-       movl %eax, ORIG_EAX(%esp)
-       movl %es, %ecx
-       movl %ecx, ES(%esp)
-       movl $(__KERNEL_DS),%eax
-       movl %eax, %ds
-       movl %eax, %es
-       pushl %edi
-       movl %esp, %eax
-       pushl %eax
-       call do_page_fault
-       jmp ret_from_exception
-
+    pushl $do_page_fault
+    jmp do_exception
+    
 ENTRY(machine_check)
        pushl $0
        pushl $do_machine_check
        jmp do_exception
 
+
 ENTRY(spurious_interrupt_bug)
        pushl $0
        pushl $do_spurious_interrupt_bug
        jmp do_exception
+
+
 
 ENTRY(thread_starter)
     popl %eax
diff -r 55f73916d319 -r 51484df99be1 extras/mini-os/x86_64.S
--- a/extras/mini-os/x86_64.S   Tue May 02 18:17:59 2006 +0100
+++ b/extras/mini-os/x86_64.S   Thu May 04 14:19:19 2006 +0100
@@ -13,40 +13,6 @@
 #define ENTRY(X) .globl X ; X :
 .globl _start, shared_info, hypercall_page
 
-#define SAVE_ALL \
-        cld; \
-        pushq %rdi; \
-        pushq %rsi; \
-        pushq %rdx; \
-        pushq %rcx; \
-        pushq %rax; \
-        pushq %r8; \
-        pushq %r9; \
-        pushq %r10; \
-        pushq %r11; \
-        pushq %rbx; \
-        pushq %rbp; \
-        pushq %r12; \
-        pushq %r13; \
-        pushq %r14; \
-        pushq %r15;
-
-#define RESTORE_ALL \
-        popq  %r15; \
-        popq  %r14; \
-        popq  %r13; \
-        popq  %r12; \
-        popq  %rbp; \
-        popq  %rbx; \
-        popq  %r11; \
-        popq  %r10; \
-        popq  %r9; \
-        popq  %r8; \
-        popq  %rax; \
-        popq  %rcx; \
-        popq  %rdx; \
-        popq  %rsi; \
-        popq  %rdi
 
 _start:
         cld
@@ -240,7 +206,17 @@ error_call_handler:
 #      CFI_ENDPROC
 .endm  
 
-
+.macro errorentry sym
+#      XCPT_FRAME
+        movq (%rsp),%rcx
+        movq 8(%rsp),%r11
+        addq $0x10,%rsp /* rsp points to the error code */
+       pushq %rax
+#      CFI_ADJUST_CFA_OFFSET 8
+       leaq  \sym(%rip),%rax
+       jmp error_entry
+#      CFI_ENDPROC
+.endm
 
 #define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
 #define XEN_PUT_VCPU_INFO(reg)
@@ -319,159 +295,84 @@ ENTRY(failsafe_callback)
         popq  %r11
         iretq
 
-error_code:
-        SAVE_ALL
-        movq  %rsp,%rdi
-        movl  15*8+4(%rsp),%eax
-        leaq  exception_table(%rip),%rdx
-        callq *(%rdx,%rax,8)
-        RESTORE_ALL
-        addq  $8,%rsp
-        iretq
-                        
+
+ENTRY(coprocessor_error)
+        zeroentry do_coprocessor_error
+
+
+ENTRY(simd_coprocessor_error)
+        zeroentry do_simd_coprocessor_error
+
+
+ENTRY(device_not_available)
+        zeroentry do_device_not_available
+
+
+ENTRY(debug)
+#       INTR_FRAME
+#       CFI_ADJUST_CFA_OFFSET 8 */
+        zeroentry do_debug
+#       CFI_ENDPROC
+
+
+ENTRY(int3)
+#       INTR_FRAME
+#       CFI_ADJUST_CFA_OFFSET 8 */
+        zeroentry do_int3
+#       CFI_ENDPROC
+
+ENTRY(overflow)
+        zeroentry do_overflow
+
+
+ENTRY(bounds)
+        zeroentry do_bounds
+    
+    
+ENTRY(invalid_op)
+        zeroentry do_invalid_op
+
+
+ENTRY(coprocessor_segment_overrun)
+        zeroentry do_coprocessor_segment_overrun
+
+
+ENTRY(invalid_TSS)
+        errorentry do_invalid_TSS
+
+
+ENTRY(segment_not_present)
+        errorentry do_segment_not_present
+
+
+/* runs on exception stack */
+ENTRY(stack_segment)
+#       XCPT_FRAME
+        errorentry do_stack_segment
+#       CFI_ENDPROC
+                    
+
+ENTRY(general_protection)
+        errorentry do_general_protection
+
+
+ENTRY(alignment_check)
+        errorentry do_alignment_check
+
+
 ENTRY(divide_error)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_divide_error,4(%rsp)
-        jmp   error_code
-        
-ENTRY(coprocessor_error)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_copro_error,4(%rsp)
-        jmp   error_code
-
-ENTRY(simd_coprocessor_error)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_simd_error,4(%rsp)
-        jmp   error_code
-
-ENTRY(device_not_available)
-        popq  %rcx
-        popq  %r11
-        movl  $TRAP_no_device,4(%rsp)
-        jmp   error_code
-
-ENTRY(debug)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_debug,4(%rsp)
-        jmp   error_code
-
-ENTRY(int3)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_int3,4(%rsp)
-        jmp   error_code
-
-ENTRY(overflow)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_overflow,4(%rsp)
-        jmp   error_code
-
-ENTRY(bounds)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_bounds,4(%rsp)
-        jmp   error_code
-
-ENTRY(invalid_op)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_invalid_op,4(%rsp)
-        jmp   error_code
-
-ENTRY(coprocessor_segment_overrun)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_copro_seg,4(%rsp)
-        jmp   error_code
-
-ENTRY(invalid_TSS)
-        popq  %rcx
-        popq  %r11
-        movl  $TRAP_invalid_tss,4(%rsp)
-        jmp   error_code
-
-ENTRY(segment_not_present)
-        popq  %rcx
-        popq  %r11
-        movl  $TRAP_no_segment,4(%rsp)
-        jmp   error_code
-
-ENTRY(stack_segment)
-        popq  %rcx
-        popq  %r11
-        movl  $TRAP_stack_error,4(%rsp)
-        jmp   error_code
-
-ENTRY(general_protection)
-        popq  %rcx
-        popq  %r11
-        movl  $TRAP_gp_fault,4(%rsp)
-        jmp   error_code
-
-ENTRY(alignment_check)
-        popq  %rcx
-        popq  %r11
-        movl  $TRAP_alignment_check,4(%rsp)
-        jmp   error_code
-
-ENTRY(virt_cr2)
-        .quad 0
+        zeroentry do_divide_error
+
+
+ENTRY(spurious_interrupt_bug)
+        zeroentry do_spurious_interrupt_bug
+            
+
 ENTRY(page_fault)
-        popq  %rcx
-        popq  %r11
-        popq  virt_cr2(%rip)
-        movl  $TRAP_page_fault,4(%rsp)
-        jmp   error_code
-        
-ENTRY(machine_check)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_machine_check,4(%rsp)
-        jmp   error_code
-
-ENTRY(spurious_interrupt_bug)
-        popq  %rcx
-        popq  %r11
-       pushq $0
-        movl  $TRAP_spurious_int,4(%rsp)
-        jmp   error_code
-
-ENTRY(exception_table)
-        .quad do_divide_error
-        .quad do_debug
-        .quad 0 # nmi
-        .quad do_int3
-        .quad do_overflow
-        .quad do_bounds
-        .quad do_invalid_op
-        .quad 0
-        .quad 0
-        .quad do_coprocessor_segment_overrun
-        .quad do_invalid_TSS
-        .quad do_segment_not_present
-        .quad do_stack_segment
-        .quad do_general_protection
-        .quad do_page_fault
-        .quad do_spurious_interrupt_bug
-        .quad do_coprocessor_error
-        .quad do_alignment_check
-        .quad do_machine_check
-        .quad do_simd_coprocessor_error
+        errorentry do_page_fault
+
+
+
 
 
 ENTRY(thread_starter)
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre
--- a/linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre        Tue May 02 
18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/arch/ia64/xen-mkbuildtree-pre        Thu May 04 
14:19:19 2006 +0100
@@ -14,28 +14,12 @@ function try_to_mv() {
        fi
 }
 
-function try_to_mkdir() {
-       if [ ! -e $2 ]
-       then
-               mv $1 $2
-               mkdir $1
-       fi
-}
-
-try_to_mkdir mm mm.xen-x86
-try_to_mv net net.xen-x86
-try_to_mv kernel kernel.xen-x86
-try_to_mv drivers/acpi/tables.c drivers/acpi/tables.c.xen-x86
-#try_to_mv arch/xen/kernel drivers/xen/core
-#try_to_mkdir arch/xen arch/xen.xen-x86
-#try_to_mv arch/xen.xen-x86/configs arch/xen
-#try_to_mv include/asm-generic include/asm-generic.xen-x86
-try_to_mkdir include/linux include/linux.xen-x86
+try_to_mv mm/Kconfig mm/Kconfig.xen-x86
 
 # need to grab a couple of xen-modified files for generic_page_range and
 # typedef pte_fn_t which are used by driver/xen blkif
-ln -sf ../mm.xen-x86/memory.c mm/
-ln -sf ../linux.xen-x86/mm.h include/linux/
+#ln -sf ../mm.xen-x86/memory.c mm/
+#ln -sf ../linux.xen-x86/mm.h include/linux/
 
 #eventually asm-xsi-offsets needs to be part of hypervisor.h/hypercall.h
 ln -sf ../../../../xen/include/asm-ia64/asm-xsi-offsets.h include/asm-ia64/xen/
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/char/tpm/Kconfig
--- a/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig     Tue May 02 18:17:59 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/char/tpm/Kconfig     Thu May 04 14:19:19 
2006 +0100
@@ -51,7 +51,7 @@ config TCG_INFINEON
 
 config TCG_XEN
        tristate "XEN TPM Interface"
-       depends on TCG_TPM && XEN && XEN_TPMDEV_FRONTEND
+       depends on TCG_TPM && XEN
        ---help---
          If you want to make TPM support available to a Xen
          user domain, say Yes and it will
@@ -60,4 +60,3 @@ config TCG_XEN
           tpm_xen.
 
 endmenu
-
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/char/tpm/Makefile
--- a/linux-2.6-xen-sparse/drivers/char/tpm/Makefile    Tue May 02 18:17:59 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/char/tpm/Makefile    Thu May 04 14:19:19 
2006 +0100
@@ -8,4 +8,4 @@ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
 obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
 obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
 obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
-obj-$(CONFIG_TCG_XEN) += tpm_xen.o
+obj-$(CONFIG_TCG_XEN) += tpm_xen.o tpm_vtpm.o
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c
--- a/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c   Tue May 02 18:17:59 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_xen.c   Thu May 04 14:19:19 
2006 +0100
@@ -1,536 +1,767 @@
 /*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright (c) 2005, IBM Corporation
  *
- * Authors:
- * Leendert van Doorn <leendert@xxxxxxxxxxxxxx>
- * Dave Safford <safford@xxxxxxxxxxxxxx>
- * Reiner Sailer <sailer@xxxxxxxxxxxxxx>
- * Kylene Hall <kjhall@xxxxxxxxxx>
- * Stefan Berger <stefanb@xxxxxxxxxx>
+ * Author: Stefan Berger, stefanb@xxxxxxxxxx
+ * Grant table support: Mahadevan Gomathisankaran
  *
- * Maintained by: <tpmdd_devel@xxxxxxxxxxxxxxxxxxxxx>
+ * This code has been derived from drivers/xen/netfront/netfront.c
  *
- * Device driver for TCG/TCPA TPM (trusted platform module) for XEN.
- * Specifications at www.trustedcomputinggroup.org
+ * Copyright (c) 2002-2004, K A Fraser
  *
  * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
  *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
  */
 
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
 #include <asm/uaccess.h>
-#include <linux/list.h>
-#include <xen/tpmfe.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include "tpm.h"
-
-/* read status bits */
-enum {
-       STATUS_BUSY = 0x01,
-       STATUS_DATA_AVAIL = 0x02,
-       STATUS_READY = 0x04
+#include <xen/evtchn.h>
+#include <xen/interface/grant_table.h>
+#include <xen/interface/io/tpmif.h>
+#include <xen/xenbus.h>
+#include "tpm_vtpm.h"
+
+#undef DEBUG
+
+/* local structures */
+struct tpm_private {
+       tpmif_tx_interface_t *tx;
+       atomic_t refcnt;
+       unsigned int evtchn;
+       unsigned int irq;
+       u8 is_connected;
+       u8 is_suspended;
+
+       spinlock_t tx_lock;
+
+       struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
+
+       atomic_t tx_busy;
+       void *tx_remember;
+       domid_t backend_id;
+       wait_queue_head_t wait_q;
+
+       struct xenbus_device *dev;
+       int ring_ref;
 };
 
-#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
-
-struct transmission {
-       struct list_head next;
-       unsigned char *request;
-       unsigned int request_len;
-       unsigned char *rcv_buffer;
-       unsigned int  buffersize;
-       unsigned int flags;
+struct tx_buffer {
+       unsigned int size;      // available space in data
+       unsigned int len;       // used space in data
+       unsigned char *data;    // pointer to a page
 };
 
-enum {
-       TRANSMISSION_FLAG_WAS_QUEUED = 0x1
+
+/* locally visible variables */
+static grant_ref_t gref_head;
+static struct tpm_private *my_priv;
+
+/* local function prototypes */
+static irqreturn_t tpmif_int(int irq,
+                             void *tpm_priv,
+                             struct pt_regs *ptregs);
+static void tpmif_rx_action(unsigned long unused);
+static int tpmif_connect(struct xenbus_device *dev,
+                         struct tpm_private *tp,
+                         domid_t domid);
+static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
+static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
+static void tpmif_free_tx_buffers(struct tpm_private *tp);
+static void tpmif_set_connected_state(struct tpm_private *tp,
+                                      u8 newstate);
+static int tpm_xmit(struct tpm_private *tp,
+                    const u8 * buf, size_t count, int userbuffer,
+                    void *remember);
+static void destroy_tpmring(struct tpm_private *tp);
+
+#define DPRINTK(fmt, args...) \
+    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
+#define IPRINTK(fmt, args...) \
+    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
+#define WPRINTK(fmt, args...) \
+    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
+
+#define GRANT_INVALID_REF      0
+
+
+static inline int
+tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
+               int isuserbuffer)
+{
+       int copied = len;
+
+       if (len > txb->size) {
+               copied = txb->size;
+       }
+       if (isuserbuffer) {
+               if (copy_from_user(txb->data, src, copied))
+                       return -EFAULT;
+       } else {
+               memcpy(txb->data, src, copied);
+       }
+       txb->len = len;
+       return copied;
+}
+
+static inline struct tx_buffer *tx_buffer_alloc(void)
+{
+       struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
+                                       GFP_KERNEL);
+
+       if (txb) {
+               txb->len = 0;
+               txb->size = PAGE_SIZE;
+               txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
+               if (txb->data == NULL) {
+                       kfree(txb);
+                       txb = NULL;
+               }
+       }
+       return txb;
+}
+
+
+static inline void tx_buffer_free(struct tx_buffer *txb)
+{
+       if (txb) {
+               free_page((long)txb->data);
+               kfree(txb);
+       }
+}
+
+/**************************************************************
+ Utility function for the tpm_private structure
+**************************************************************/
+static inline void tpm_private_init(struct tpm_private *tp)
+{
+       spin_lock_init(&tp->tx_lock);
+       init_waitqueue_head(&tp->wait_q);
+       atomic_set(&tp->refcnt, 1);
+}
+
+static inline void tpm_private_put(void)
+{
+       if ( atomic_dec_and_test(&my_priv->refcnt)) {
+               tpmif_free_tx_buffers(my_priv);
+               kfree(my_priv);
+               my_priv = NULL;
+       }
+}
+
+static struct tpm_private *tpm_private_get(void)
+{
+       int err;
+       if (!my_priv) {
+               my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
+               if (my_priv) {
+                       tpm_private_init(my_priv);
+                       err = tpmif_allocate_tx_buffers(my_priv);
+                       if (err < 0) {
+                               tpm_private_put();
+                       }
+               }
+       } else {
+               atomic_inc(&my_priv->refcnt);
+       }
+       return my_priv;
+}
+
+/**************************************************************
+
+ The interface to let the tpm plugin register its callback
+ function and send data to another partition using this module
+
+**************************************************************/
+
+static DEFINE_MUTEX(suspend_lock);
+/*
+ * Send data via this module by calling this function
+ */
+int vtpm_vd_send(struct tpm_chip *chip,
+                 struct tpm_private *tp,
+                 const u8 * buf, size_t count, void *ptr)
+{
+       int sent;
+
+       mutex_lock(&suspend_lock);
+       sent = tpm_xmit(tp, buf, count, 0, ptr);
+       mutex_unlock(&suspend_lock);
+
+       return sent;
+}
+
+/**************************************************************
+ XENBUS support code
+**************************************************************/
+
+static int setup_tpmring(struct xenbus_device *dev,
+                         struct tpm_private *tp)
+{
+       tpmif_tx_interface_t *sring;
+       int err;
+
+       tp->ring_ref = GRANT_INVALID_REF;
+
+       sring = (void *)__get_free_page(GFP_KERNEL);
+       if (!sring) {
+               xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+               return -ENOMEM;
+       }
+       tp->tx = sring;
+
+       err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
+       if (err < 0) {
+               free_page((unsigned long)sring);
+               tp->tx = NULL;
+               xenbus_dev_fatal(dev, err, "allocating grant reference");
+               goto fail;
+       }
+       tp->ring_ref = err;
+
+       err = tpmif_connect(dev, tp, dev->otherend_id);
+       if (err)
+               goto fail;
+
+       return 0;
+fail:
+       destroy_tpmring(tp);
+       return err;
+}
+
+
+static void destroy_tpmring(struct tpm_private *tp)
+{
+       tpmif_set_connected_state(tp, 0);
+
+       if (tp->ring_ref != GRANT_INVALID_REF) {
+               gnttab_end_foreign_access(tp->ring_ref, 0,
+                                         (unsigned long)tp->tx);
+               tp->ring_ref = GRANT_INVALID_REF;
+               tp->tx = NULL;
+       }
+
+       if (tp->irq)
+               unbind_from_irqhandler(tp->irq, tp);
+
+       tp->evtchn = tp->irq = 0;
+}
+
+
+static int talk_to_backend(struct xenbus_device *dev,
+                           struct tpm_private *tp)
+{
+       const char *message = NULL;
+       int err;
+       xenbus_transaction_t xbt;
+
+       err = setup_tpmring(dev, tp);
+       if (err) {
+               xenbus_dev_fatal(dev, err, "setting up ring");
+               goto out;
+       }
+
+again:
+       err = xenbus_transaction_start(&xbt);
+       if (err) {
+               xenbus_dev_fatal(dev, err, "starting transaction");
+               goto destroy_tpmring;
+       }
+
+       err = xenbus_printf(xbt, dev->nodename,
+                           "ring-ref","%u", tp->ring_ref);
+       if (err) {
+               message = "writing ring-ref";
+               goto abort_transaction;
+       }
+
+       err = xenbus_printf(xbt, dev->nodename,
+                           "event-channel", "%u", tp->evtchn);
+       if (err) {
+               message = "writing event-channel";
+               goto abort_transaction;
+       }
+
+       err = xenbus_transaction_end(xbt, 0);
+       if (err == -EAGAIN)
+               goto again;
+       if (err) {
+               xenbus_dev_fatal(dev, err, "completing transaction");
+               goto destroy_tpmring;
+       }
+
+       xenbus_switch_state(dev, XenbusStateConnected);
+
+       return 0;
+
+abort_transaction:
+       xenbus_transaction_end(xbt, 1);
+       if (message)
+               xenbus_dev_error(dev, err, "%s", message);
+destroy_tpmring:
+       destroy_tpmring(tp);
+out:
+       return err;
+}
+
+/**
+ * Callback received when the backend's state changes.
+ */
+static void backend_changed(struct xenbus_device *dev,
+                           XenbusState backend_state)
+{
+       struct tpm_private *tp = dev->data;
+       DPRINTK("\n");
+
+       switch (backend_state) {
+       case XenbusStateInitialising:
+       case XenbusStateInitWait:
+       case XenbusStateInitialised:
+       case XenbusStateUnknown:
+               break;
+
+       case XenbusStateConnected:
+               tpmif_set_connected_state(tp, 1);
+               break;
+
+       case XenbusStateClosing:
+               tpmif_set_connected_state(tp, 0);
+               break;
+
+       case XenbusStateClosed:
+               if (tp->is_suspended == 0) {
+                       device_unregister(&dev->dev);
+               }
+               xenbus_switch_state(dev, XenbusStateClosed);
+               break;
+       }
+}
+
+
+static int tpmfront_probe(struct xenbus_device *dev,
+                          const struct xenbus_device_id *id)
+{
+       int err;
+       int handle;
+       struct tpm_private *tp = tpm_private_get();
+
+       if (!tp)
+               return -ENOMEM;
+
+       err = xenbus_scanf(XBT_NULL, dev->nodename,
+                          "handle", "%i", &handle);
+       if (XENBUS_EXIST_ERR(err))
+               return err;
+
+       if (err < 0) {
+               xenbus_dev_fatal(dev,err,"reading virtual-device");
+               return err;
+       }
+
+       tp->dev = dev;
+       dev->data = tp;
+
+       err = talk_to_backend(dev, tp);
+       if (err) {
+               tpm_private_put();
+               dev->data = NULL;
+               return err;
+       }
+       return 0;
+}
+
+
+static int tpmfront_remove(struct xenbus_device *dev)
+{
+       struct tpm_private *tp = (struct tpm_private *)dev->data;
+       destroy_tpmring(tp);
+       return 0;
+}
+
+static int tpmfront_suspend(struct xenbus_device *dev)
+{
+       struct tpm_private *tp = (struct tpm_private *)dev->data;
+       u32 ctr;
+
+       /* lock, so no app can send */
+       mutex_lock(&suspend_lock);
+       tp->is_suspended = 1;
+
+       for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
+               if ((ctr % 10) == 0)
+                       printk("TPM-FE [INFO]: Waiting for outstanding 
request.\n");
+               /*
+                * Wait for a request to be responded to.
+                */
+               interruptible_sleep_on_timeout(&tp->wait_q, 100);
+       }
+       xenbus_switch_state(dev, XenbusStateClosed);
+
+       if (atomic_read(&tp->tx_busy)) {
+               /*
+                * A temporary work-around.
+                */
+               printk("TPM-FE [WARNING]: Resetting busy flag.");
+               atomic_set(&tp->tx_busy, 0);
+       }
+
+       return 0;
+}
+
+static int tpmfront_resume(struct xenbus_device *dev)
+{
+       struct tpm_private *tp = (struct tpm_private *)dev->data;
+       destroy_tpmring(tp);
+       return talk_to_backend(dev, tp);
+}
+
+static int tpmif_connect(struct xenbus_device *dev,
+                         struct tpm_private *tp,
+                         domid_t domid)
+{
+       int err;
+
+       tp->backend_id = domid;
+
+       err = xenbus_alloc_evtchn(dev, &tp->evtchn);
+       if (err)
+               return err;
+
+       err = bind_evtchn_to_irqhandler(tp->evtchn,
+                                       tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
+                                       tp);
+       if (err <= 0) {
+               WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
+               return err;
+       }
+
+       tp->irq = err;
+       return 0;
+}
+
+static struct xenbus_device_id tpmfront_ids[] = {
+       { "vtpm" },
+       { "" }
 };
 
-struct data_exchange {
-       struct transmission *current_request;
-       spinlock_t           req_list_lock;
-       wait_queue_head_t    req_wait_queue;
-
-       struct list_head     queued_requests;
-
-       struct transmission *current_response;
-       spinlock_t           resp_list_lock;
-       wait_queue_head_t    resp_wait_queue;     // processes waiting for 
responses
-
-       struct transmission *req_cancelled;       // if a cancellation was 
encounterd
-
-       unsigned int         fe_status;
-       unsigned int         flags;
+static struct xenbus_driver tpmfront = {
+       .name = "vtpm",
+       .owner = THIS_MODULE,
+       .ids = tpmfront_ids,
+       .probe = tpmfront_probe,
+       .remove =  tpmfront_remove,
+       .resume = tpmfront_resume,
+       .otherend_changed = backend_changed,
+       .suspend = tpmfront_suspend,
 };
 
-enum {
-       DATAEX_FLAG_QUEUED_ONLY = 0x1
+static void __init init_tpm_xenbus(void)
+{
+       xenbus_register_frontend(&tpmfront);
+}
+
+static void __exit exit_tpm_xenbus(void)
+{
+       xenbus_unregister_driver(&tpmfront);
+}
+
+static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
+{
+       unsigned int i;
+
+       for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
+               tp->tx_buffers[i] = tx_buffer_alloc();
+               if (!tp->tx_buffers[i]) {
+                       tpmif_free_tx_buffers(tp);
+                       return -ENOMEM;
+               }
+       }
+       return 0;
+}
+
+static void tpmif_free_tx_buffers(struct tpm_private *tp)
+{
+       unsigned int i;
+
+       for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
+               tx_buffer_free(tp->tx_buffers[i]);
+       }
+}
+
+static void tpmif_rx_action(unsigned long priv)
+{
+       struct tpm_private *tp = (struct tpm_private *)priv;
+
+       int i = 0;
+       unsigned int received;
+       unsigned int offset = 0;
+       u8 *buffer;
+       tpmif_tx_request_t *tx;
+       tx = &tp->tx->ring[i].req;
+
+       atomic_set(&tp->tx_busy, 0);
+       wake_up_interruptible(&tp->wait_q);
+
+       received = tx->size;
+
+       buffer = kmalloc(received, GFP_ATOMIC);
+       if (NULL == buffer) {
+               goto exit;
+       }
+
+       for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
+               struct tx_buffer *txb = tp->tx_buffers[i];
+               tpmif_tx_request_t *tx;
+               unsigned int tocopy;
+
+               tx = &tp->tx->ring[i].req;
+               tocopy = tx->size;
+               if (tocopy > PAGE_SIZE) {
+                       tocopy = PAGE_SIZE;
+               }
+
+               memcpy(&buffer[offset], txb->data, tocopy);
+
+               gnttab_release_grant_reference(&gref_head, tx->ref);
+
+               offset += tocopy;
+       }
+
+       vtpm_vd_recv(buffer, received, tp->tx_remember);
+       kfree(buffer);
+
+exit:
+
+       return;
+}
+
+
+static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
+{
+       struct tpm_private *tp = tpm_priv;
+       unsigned long flags;
+
+       spin_lock_irqsave(&tp->tx_lock, flags);
+       tpmif_rx_tasklet.data = (unsigned long)tp;
+       tasklet_schedule(&tpmif_rx_tasklet);
+       spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+
+static int tpm_xmit(struct tpm_private *tp,
+                    const u8 * buf, size_t count, int isuserbuffer,
+                    void *remember)
+{
+       tpmif_tx_request_t *tx;
+       TPMIF_RING_IDX i;
+       unsigned int offset = 0;
+
+       spin_lock_irq(&tp->tx_lock);
+
+       if (unlikely(atomic_read(&tp->tx_busy))) {
+               printk("tpm_xmit: There's an outstanding request/response "
+                      "on the way!\n");
+               spin_unlock_irq(&tp->tx_lock);
+               return -EBUSY;
+       }
+
+       if (tp->is_connected != 1) {
+               spin_unlock_irq(&tp->tx_lock);
+               return -EIO;
+       }
+
+       for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
+               struct tx_buffer *txb = tp->tx_buffers[i];
+               int copied;
+
+               if (NULL == txb) {
+                       DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
+                               "Not transmitting anything!\n", i);
+                       spin_unlock_irq(&tp->tx_lock);
+                       return -EFAULT;
+               }
+               copied = tx_buffer_copy(txb, &buf[offset], count,
+                                       isuserbuffer);
+               if (copied < 0) {
+                       /* An error occurred */
+                       spin_unlock_irq(&tp->tx_lock);
+                       return copied;
+               }
+               count -= copied;
+               offset += copied;
+
+               tx = &tp->tx->ring[i].req;
+
+               tx->addr = virt_to_machine(txb->data);
+               tx->size = txb->len;
+
+               DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 
0x%02x 0x%02x\n",
+                       txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
+
+               /* get the granttable reference for this page */
+               tx->ref = gnttab_claim_grant_reference(&gref_head);
+
+               if (-ENOSPC == tx->ref) {
+                       spin_unlock_irq(&tp->tx_lock);
+                       DPRINTK(" Grant table claim reference failed in func:%s 
line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
+                       return -ENOSPC;
+               }
+               gnttab_grant_foreign_access_ref( tx->ref,
+                                                tp->backend_id,
+                                                (tx->addr >> PAGE_SHIFT),
+                                                0 /*RW*/);
+               wmb();
+       }
+
+       atomic_set(&tp->tx_busy, 1);
+       tp->tx_remember = remember;
+       mb();
+
+       DPRINTK("Notifying backend via event channel %d\n",
+               tp->evtchn);
+
+       notify_remote_via_irq(tp->irq);
+
+       spin_unlock_irq(&tp->tx_lock);
+       return offset;
+}
+
+
+static void tpmif_notify_upperlayer(struct tpm_private *tp)
+{
+       /*
+        * Notify upper layer about the state of the connection
+        * to the BE.
+        */
+       if (tp->is_connected) {
+               vtpm_vd_status(TPM_VD_STATUS_CONNECTED);
+       } else {
+               vtpm_vd_status(TPM_VD_STATUS_DISCONNECTED);
+       }
+}
+
+
+static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
+{
+       /*
+        * Don't notify upper layer if we are in suspend mode and
+        * should disconnect - assumption is that we will resume
+        * The mutex keeps apps from sending.
+        */
+       if (is_connected == 0 && tp->is_suspended == 1) {
+               return;
+       }
+
+       /*
+        * Unlock the mutex if we are connected again
+        * after being suspended - now resuming.
+        * This also removes the suspend state.
+        */
+       if (is_connected == 1 && tp->is_suspended == 1) {
+               tp->is_suspended = 0;
+               /* unlock, so apps can resume sending */
+               mutex_unlock(&suspend_lock);
+       }
+
+       if (is_connected != tp->is_connected) {
+               tp->is_connected = is_connected;
+               tpmif_notify_upperlayer(tp);
+       }
+}
+
+
+
+/* =================================================================
+ * Initialization function.
+ * =================================================================
+ */
+
+struct tpm_virtual_device tvd = {
+       .max_tx_size = PAGE_SIZE * TPMIF_TX_RING_SIZE,
 };
 
-static struct data_exchange dataex;
-
-static unsigned long disconnect_time;
-
-static struct tpmfe_device tpmfe;
-
-/* local function prototypes */
-static void __exit cleanup_xen(void);
-
-
-/* =============================================================
- * Some utility functions
- * =============================================================
- */
-static inline struct transmission *
-transmission_alloc(void)
-{
-       return kzalloc(sizeof(struct transmission), GFP_KERNEL);
-}
-
-static inline unsigned char *
-transmission_set_buffer(struct transmission *t,
-                        unsigned char *buffer, unsigned int len)
-{
-       kfree(t->request);
-       t->request = kmalloc(len, GFP_KERNEL);
-       if (t->request) {
-               memcpy(t->request,
-                      buffer,
-                      len);
-               t->request_len = len;
-       }
-       return t->request;
-}
-
-static inline void
-transmission_free(struct transmission *t)
-{
-       kfree(t->request);
-       kfree(t->rcv_buffer);
-       kfree(t);
-}
-
-/* =============================================================
- * Interface with the TPM shared memory driver for XEN
- * =============================================================
- */
-static int tpm_recv(const u8 *buffer, size_t count, const void *ptr)
-{
-       int ret_size = 0;
-       struct transmission *t;
-
-       /*
-        * The list with requests must contain one request
-        * only and the element there must be the one that
-        * was passed to me from the front-end.
-        */
-       if (dataex.current_request != ptr) {
-               printk("WARNING: The request pointer is different than the "
-                      "pointer the shared memory driver returned to me. "
-                      "%p != %p\n",
-                      dataex.current_request, ptr);
-       }
-
-       /*
-        * If the request has been cancelled, just quit here
-        */
-       if (dataex.req_cancelled == (struct transmission *)ptr) {
-               if (dataex.current_request == dataex.req_cancelled) {
-                       dataex.current_request = NULL;
-               }
-               transmission_free(dataex.req_cancelled);
-               dataex.req_cancelled = NULL;
-               return 0;
-       }
-
-       if (NULL != (t = dataex.current_request)) {
-               transmission_free(t);
-               dataex.current_request = NULL;
-       }
-
-       t = transmission_alloc();
-       if (t) {
-               unsigned long flags;
-               t->rcv_buffer = kmalloc(count, GFP_KERNEL);
-               if (! t->rcv_buffer) {
-                       transmission_free(t);
-                       return -ENOMEM;
-               }
-               t->buffersize = count;
-               memcpy(t->rcv_buffer, buffer, count);
-               ret_size = count;
-
-               spin_lock_irqsave(&dataex.resp_list_lock ,flags);
-               dataex.current_response = t;
-               spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-               wake_up_interruptible(&dataex.resp_wait_queue);
-       }
-       return ret_size;
-}
-
-
-static void tpm_fe_status(unsigned int flags)
-{
-       dataex.fe_status = flags;
-       if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
-               disconnect_time = jiffies;
-       }
-}
-
-/* =============================================================
- * Interface with the generic TPM driver
- * =============================================================
- */
-static int tpm_xen_recv(struct tpm_chip *chip, u8 * buf, size_t count)
-{
-       unsigned long flags;
-       int rc = 0;
-
-       spin_lock_irqsave(&dataex.resp_list_lock, flags);
-       /*
-        * Check if the previous operation only queued the command
-        * In this case there won't be a response, so I just
-        * return from here and reset that flag. In any other
-        * case I should receive a response from the back-end.
-        */
-       if ((dataex.flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
-               dataex.flags &= ~DATAEX_FLAG_QUEUED_ONLY;
-               spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-               /*
-                * a little hack here. The first few measurements
-                * are queued since there's no way to talk to the
-                * TPM yet (due to slowness of the control channel)
-                * So we just make IMA happy by giving it 30 NULL
-                * bytes back where the most important part is
-                * that the result code is '0'.
-                */
-
-               count = MIN(count, 30);
-               memset(buf, 0x0, count);
-               return count;
-       }
-       /*
-        * Check whether something is in the responselist and if
-        * there's nothing in the list wait for something to appear.
-        */
-
-       if (NULL == dataex.current_response) {
-               spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-               interruptible_sleep_on_timeout(&dataex.resp_wait_queue,
-                                              1000);
-               spin_lock_irqsave(&dataex.resp_list_lock ,flags);
-       }
-
-       if (NULL != dataex.current_response) {
-               struct transmission *t = dataex.current_response;
-               dataex.current_response = NULL;
-               rc = MIN(count, t->buffersize);
-               memcpy(buf, t->rcv_buffer, rc);
-               transmission_free(t);
-       }
-
-       spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-       return rc;
-}
-
-static int tpm_xen_send(struct tpm_chip *chip, u8 * buf, size_t count)
-{
-       /*
-        * We simply pass the packet onto the XEN shared
-        * memory driver.
-        */
-       unsigned long flags;
+static int __init tpmif_init(void)
+{
        int rc;
-       struct transmission *t = transmission_alloc();
-
-       spin_lock_irqsave(&dataex.req_list_lock, flags);
-       /*
-        * If there's a current request, it must be the
-        * previous request that has timed out.
-        */
-       if (dataex.current_request != NULL) {
-               printk("WARNING: Sending although there is a request 
outstanding.\n"
-                      "         Previous request must have timed out.\n");
-               transmission_free(dataex.current_request);
-               dataex.current_request = NULL;
-       }
-
-       if (t != NULL) {
-               unsigned int error = 0;
-               /*
-                * Queue the packet if the driver below is not
-                * ready, yet, or there is any packet already
-                * in the queue.
-                * If the driver below is ready, unqueue all
-                * packets first before sending our current
-                * packet.
-                * For each unqueued packet, except for the
-                * last (=current) packet, call the function
-                * tpm_xen_recv to wait for the response to come
-                * back.
-                */
-               if ((dataex.fe_status & TPMFE_STATUS_CONNECTED) == 0) {
-                       if (time_after(jiffies, disconnect_time + HZ * 10)) {
-                               rc = -ENOENT;
-                       } else {
-                               /*
-                                * copy the request into the buffer
-                                */
-                               if (transmission_set_buffer(t, buf, count)
-                                   == NULL) {
-                                       transmission_free(t);
-                                       rc = -ENOMEM;
-                                       goto exit;
-                               }
-                               dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
-                               list_add_tail(&t->next, 
&dataex.queued_requests);
-                               rc = 0;
-                       }
-               } else {
-                       /*
-                        * Check whether there are any packets in the queue
-                        */
-                       while (!list_empty(&dataex.queued_requests)) {
-                               /*
-                                * Need to dequeue them.
-                                * Read the result into a dummy buffer.
-                                */
-                               unsigned char buffer[1];
-                               struct transmission *qt = (struct transmission 
*) dataex.queued_requests.next;
-                               list_del(&qt->next);
-                               dataex.current_request = qt;
-                               spin_unlock_irqrestore(&dataex.req_list_lock,
-                                                      flags);
-
-                               rc = tpm_fe_send(tpmfe.tpm_private,
-                                                qt->request,
-                                                qt->request_len,
-                                                qt);
-
-                               if (rc < 0) {
-                                       
spin_lock_irqsave(&dataex.req_list_lock, flags);
-                                       if ((qt = dataex.current_request) != 
NULL) {
-                                               /*
-                                                * requeue it at the beginning
-                                                * of the list
-                                                */
-                                               list_add(&qt->next,
-                                                        
&dataex.queued_requests);
-                                       }
-                                       dataex.current_request = NULL;
-                                       error = 1;
-                                       break;
-                               }
-                               /*
-                                * After this point qt is not valid anymore!
-                                * It is freed when the front-end is delivering 
the data
-                                * by calling tpm_recv
-                                */
-
-                               /*
-                                * Try to receive the response now into the 
provided dummy
-                                * buffer (I don't really care about this 
response since
-                                * there is no receiver anymore for this 
response)
-                                */
-                               rc = tpm_xen_recv(chip, buffer, sizeof(buffer));
-
-                               spin_lock_irqsave(&dataex.req_list_lock, flags);
-                       }
-
-                       if (error == 0) {
-                               /*
-                                * Finally, send the current request.
-                                */
-                               dataex.current_request = t;
-                               /*
-                                * Call the shared memory driver
-                                * Pass to it the buffer with the request, the
-                                * amount of bytes in the request and
-                                * a void * pointer (here: transmission 
structure)
-                                */
-                               rc = tpm_fe_send(tpmfe.tpm_private,
-                                                buf, count, t);
-                               /*
-                                * The generic TPM driver will call
-                                * the function to receive the response.
-                                */
-                               if (rc < 0) {
-                                       dataex.current_request = NULL;
-                                       goto queue_it;
-                               }
-                       } else {
-queue_it:
-                               if (transmission_set_buffer(t, buf, count) == 
NULL) {
-                                       transmission_free(t);
-                                       rc = -ENOMEM;
-                                       goto exit;
-                               }
-                               /*
-                                * An error occurred. Don't event try
-                                * to send the current request. Just
-                                * queue it.
-                                */
-                               dataex.flags |= DATAEX_FLAG_QUEUED_ONLY;
-                               list_add_tail(&t->next,
-                                             &dataex.queued_requests);
-                               rc = 0;
-                       }
-               }
-       } else {
-               rc = -ENOMEM;
-       }
-
-exit:
-       spin_unlock_irqrestore(&dataex.req_list_lock, flags);
-       return rc;
-}
-
-static void tpm_xen_cancel(struct tpm_chip *chip)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&dataex.resp_list_lock,flags);
-
-       dataex.req_cancelled = dataex.current_request;
-
-       spin_unlock_irqrestore(&dataex.resp_list_lock,flags);
-}
-
-static u8 tpm_xen_status(struct tpm_chip *chip)
-{
-       unsigned long flags;
-       u8 rc = 0;
-       spin_lock_irqsave(&dataex.resp_list_lock, flags);
-       /*
-        * Data are available if:
-        *  - there's a current response
-        *  - the last packet was queued only (this is fake, but necessary to
-        *      get the generic TPM layer to call the receive function.)
-        */
-       if (NULL != dataex.current_response ||
-           0 != (dataex.flags & DATAEX_FLAG_QUEUED_ONLY)) {
-               rc = STATUS_DATA_AVAIL;
-       }
-       spin_unlock_irqrestore(&dataex.resp_list_lock, flags);
-       return rc;
-}
-
-static struct file_operations tpm_xen_ops = {
-       .owner = THIS_MODULE,
-       .llseek = no_llseek,
-       .open = tpm_open,
-       .read = tpm_read,
-       .write = tpm_write,
-       .release = tpm_release,
-};
-
-static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
-static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
-static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
-static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
-
-static struct attribute* xen_attrs[] = {
-       &dev_attr_pubek.attr,
-       &dev_attr_pcrs.attr,
-       &dev_attr_caps.attr,
-       &dev_attr_cancel.attr,
-       NULL,
-};
-
-static struct attribute_group xen_attr_grp = { .attrs = xen_attrs };
-
-static struct tpm_vendor_specific tpm_xen = {
-       .recv = tpm_xen_recv,
-       .send = tpm_xen_send,
-       .cancel = tpm_xen_cancel,
-       .status = tpm_xen_status,
-       .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
-       .req_complete_val  = STATUS_DATA_AVAIL,
-       .req_canceled = STATUS_READY,
-       .base = 0,
-       .attr_group = &xen_attr_grp,
-       .miscdev.fops = &tpm_xen_ops,
-       .buffersize = 64 * 1024,
-};
-
-static struct platform_device *pdev;
-
-static struct tpmfe_device tpmfe = {
-       .receive = tpm_recv,
-       .status  = tpm_fe_status,
-};
-
-
-static int __init init_xen(void)
-{
-       int rc;
+       struct tpm_private *tp;
 
        if ((xen_start_info->flags & SIF_INITDOMAIN)) {
                return -EPERM;
        }
-       /*
-        * Register device with the low lever front-end
-        * driver
-        */
-       if ((rc = tpm_fe_register_receiver(&tpmfe)) < 0) {
-               goto err_exit;
-       }
-
-       /*
-        * Register our device with the system.
-        */
-       pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
-       if (IS_ERR(pdev)) {
-               rc = PTR_ERR(pdev);
-               goto err_unreg_fe;
-       }
-
-       tpm_xen.buffersize = tpmfe.max_tx_size;
-
-       if ((rc = tpm_register_hardware(&pdev->dev, &tpm_xen)) < 0) {
-               goto err_unreg_pdev;
-       }
-
-       dataex.current_request = NULL;
-       spin_lock_init(&dataex.req_list_lock);
-       init_waitqueue_head(&dataex.req_wait_queue);
-       INIT_LIST_HEAD(&dataex.queued_requests);
-
-       dataex.current_response = NULL;
-       spin_lock_init(&dataex.resp_list_lock);
-       init_waitqueue_head(&dataex.resp_wait_queue);
-
-       disconnect_time = jiffies;
-
-       return 0;
-
-
-err_unreg_pdev:
-       platform_device_unregister(pdev);
-err_unreg_fe:
-       tpm_fe_unregister_receiver();
-
-err_exit:
+
+       tp = tpm_private_get();
+       if (!tp) {
+               rc = -ENOMEM;
+               goto failexit;
+       }
+
+       tvd.tpm_private = tp;
+       rc = init_vtpm(&tvd);
+       if (rc)
+               goto init_vtpm_failed;
+
+       IPRINTK("Initialising the vTPM driver.\n");
+       if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
+                                            &gref_head ) < 0) {
+               rc = -EFAULT;
+               goto gnttab_alloc_failed;
+       }
+
+       init_tpm_xenbus();
+       return 0;
+
+gnttab_alloc_failed:
+       cleanup_vtpm();
+init_vtpm_failed:
+       tpm_private_put();
+failexit:
+
        return rc;
 }
 
-static void __exit cleanup_xen(void)
-{
-       struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
-       if (chip) {
-               tpm_remove_hardware(chip->dev);
-               platform_device_unregister(pdev);
-               tpm_fe_unregister_receiver();
-       }
-}
-
-module_init(init_xen);
-module_exit(cleanup_xen);
-
-MODULE_AUTHOR("Stefan Berger (stefanb@xxxxxxxxxx)");
-MODULE_DESCRIPTION("TPM Driver for XEN (shared memory)");
-MODULE_VERSION("1.0");
-MODULE_LICENSE("GPL");
+
+static void __exit tpmif_exit(void)
+{
+       cleanup_vtpm();
+       tpm_private_put();
+       exit_tpm_xenbus();
+       gnttab_free_grant_references(gref_head);
+}
+
+module_init(tpmif_init);
+module_exit(tpmif_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r 55f73916d319 -r 51484df99be1 linux-2.6-xen-sparse/drivers/xen/Kconfig
--- a/linux-2.6-xen-sparse/drivers/xen/Kconfig  Tue May 02 18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/Kconfig  Thu May 04 14:19:19 2006 +0100
@@ -173,14 +173,6 @@ config XEN_BLKDEV_TAP
          to a character device, allowing device prototyping in application
          space.  Odds are that you want to say N here.
 
-config XEN_TPMDEV_FRONTEND
-       tristate "TPM-device frontend driver"
-       default n
-       select TCG_TPM
-       select TCG_XEN
-       help
-         The TPM-device frontend driver.
-
 config XEN_SCRUB_PAGES
        bool "Scrub memory before freeing it to Xen"
        default y
diff -r 55f73916d319 -r 51484df99be1 linux-2.6-xen-sparse/drivers/xen/Makefile
--- a/linux-2.6-xen-sparse/drivers/xen/Makefile Tue May 02 18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/Makefile Thu May 04 14:19:19 2006 +0100
@@ -16,7 +16,6 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND)     += blk
 obj-$(CONFIG_XEN_BLKDEV_FRONTEND)      += blkfront/
 obj-$(CONFIG_XEN_NETDEV_FRONTEND)      += netfront/
 obj-$(CONFIG_XEN_BLKDEV_TAP)           += blktap/
-obj-$(CONFIG_XEN_TPMDEV_FRONTEND)      += tpmfront/
 obj-$(CONFIG_XEN_PCIDEV_BACKEND)       += pciback/
 obj-$(CONFIG_XEN_PCIDEV_FRONTEND)      += pcifront/
 
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/tpmback/common.h
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Tue May 02 18:17:59 
2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/common.h Thu May 04 14:19:19 
2006 +0100
@@ -50,6 +50,8 @@ typedef struct tpmif_st {
        grant_handle_t shmem_handle;
        grant_ref_t shmem_ref;
        struct page *pagerange;
+
+       char devname[20];
 } tpmif_t;
 
 void tpmif_disconnect_complete(tpmif_t * tpmif);
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c      Tue May 02 
18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/interface.c      Thu May 04 
14:19:19 2006 +0100
@@ -32,6 +32,7 @@ static tpmif_t *alloc_tpmif(domid_t domi
        tpmif->domid = domid;
        tpmif->status = DISCONNECTED;
        tpmif->tpm_instance = instance;
+       snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
        atomic_set(&tpmif->refcnt, 1);
 
        tpmif->pagerange = balloon_alloc_empty_page_range(TPMIF_TX_RING_SIZE);
@@ -144,7 +145,7 @@ int tpmif_map(tpmif_t *tpmif, unsigned l
        tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
 
        tpmif->irq = bind_evtchn_to_irqhandler(
-               tpmif->evtchn, tpmif_be_int, 0, "tpmif-backend", tpmif);
+               tpmif->evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
        tpmif->shmem_ref = shared_page;
        tpmif->active = 1;
 
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c        Tue May 02 
18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/tpmback/tpmback.c        Thu May 04 
14:19:19 2006 +0100
@@ -28,7 +28,8 @@ struct data_exchange {
        struct list_head pending_pak;
        struct list_head current_pak;
        unsigned int copied_so_far;
-       u8 has_opener;
+       u8 has_opener:1;
+       u8 aborted:1;
        rwlock_t pak_lock;      // protects all of the previous fields
        wait_queue_head_t wait_queue;
 };
@@ -101,6 +102,16 @@ static inline int copy_to_buffer(void *t
        return 0;
 }
 
+
+static void dataex_init(struct data_exchange *dataex)
+{
+       INIT_LIST_HEAD(&dataex->pending_pak);
+       INIT_LIST_HEAD(&dataex->current_pak);
+       dataex->has_opener = 0;
+       rwlock_init(&dataex->pak_lock);
+       init_waitqueue_head(&dataex->wait_queue);
+}
+
 /***************************************************************
  Packet-related functions
 ***************************************************************/
@@ -148,11 +159,12 @@ static struct packet *packet_alloc(tpmif
                                   u32 size, u8 req_tag, u8 flags)
 {
        struct packet *pak = NULL;
-       pak = kzalloc(sizeof (struct packet), GFP_KERNEL);
+       pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
        if (NULL != pak) {
                if (tpmif) {
                        pak->tpmif = tpmif;
                        pak->tpm_instance = tpmif->tpm_instance;
+                       tpmif_get(tpmif);
                }
                pak->data_len = size;
                pak->req_tag = req_tag;
@@ -180,6 +192,9 @@ static void packet_free(struct packet *p
        if (timer_pending(&pak->processing_timer)) {
                BUG();
        }
+
+       if (pak->tpmif)
+               tpmif_put(pak->tpmif);
        kfree(pak->data_buffer);
        /*
         * cannot do tpmif_put(pak->tpmif); bad things happen
@@ -271,7 +286,6 @@ int _packet_write(struct packet *pak,
                struct gnttab_map_grant_ref map_op;
                struct gnttab_unmap_grant_ref unmap_op;
                tpmif_tx_request_t *tx;
-               unsigned long pfn, mfn, mfn_orig;
 
                tx = &tpmif->tx->ring[i].req;
 
@@ -294,12 +308,6 @@ int _packet_write(struct packet *pak,
                        DPRINTK(" Grant table operation failure !\n");
                        return 0;
                }
-
-               pfn = __pa(MMAP_VADDR(tpmif, i)) >> PAGE_SHIFT;
-               mfn = FOREIGN_FRAME(map_op.dev_bus_addr >> PAGE_SHIFT);
-               mfn_orig = pfn_to_mfn(pfn);
-
-               set_phys_to_machine(pfn, mfn);
 
                tocopy = MIN(size - offset, PAGE_SIZE);
 
@@ -310,8 +318,6 @@ int _packet_write(struct packet *pak,
                        return -EFAULT;
                }
                tx->size = tocopy;
-
-               set_phys_to_machine(pfn, mfn_orig);
 
                gnttab_set_unmap_op(&unmap_op, MMAP_VADDR(tpmif, i),
                                    GNTMAP_host_map, handle);
@@ -514,27 +520,41 @@ static ssize_t vtpm_op_read(struct file 
        unsigned long flags;
 
        write_lock_irqsave(&dataex.pak_lock, flags);
+       if (dataex.aborted) {
+               dataex.aborted = 0;
+               dataex.copied_so_far = 0;
+               write_unlock_irqrestore(&dataex.pak_lock, flags);
+               return -EIO;
+       }
 
        if (list_empty(&dataex.pending_pak)) {
                write_unlock_irqrestore(&dataex.pak_lock, flags);
                wait_event_interruptible(dataex.wait_queue,
                                         !list_empty(&dataex.pending_pak));
                write_lock_irqsave(&dataex.pak_lock, flags);
+               dataex.copied_so_far = 0;
        }
 
        if (!list_empty(&dataex.pending_pak)) {
                unsigned int left;
+
                pak = list_entry(dataex.pending_pak.next, struct packet, next);
-
                left = pak->data_len - dataex.copied_so_far;
+               list_del(&pak->next);
+               write_unlock_irqrestore(&dataex.pak_lock, flags);
 
                DPRINTK("size given by app: %d, available: %d\n", size, left);
 
                ret_size = MIN(size, left);
 
                ret_size = packet_read(pak, ret_size, data, size, 1);
+
+               write_lock_irqsave(&dataex.pak_lock, flags);
+
                if (ret_size < 0) {
-                       ret_size = -EFAULT;
+                       del_singleshot_timer_sync(&pak->processing_timer);
+                       packet_free(pak);
+                       dataex.copied_so_far = 0;
                } else {
                        DPRINTK("Copied %d bytes to user buffer\n", ret_size);
 
@@ -545,7 +565,6 @@ static ssize_t vtpm_op_read(struct file 
 
                                del_singleshot_timer_sync(&pak->
                                                          processing_timer);
-                               list_del(&pak->next);
                                list_add_tail(&pak->next, &dataex.current_pak);
                                /*
                                 * The more fontends that are handled at the 
same time,
@@ -554,6 +573,8 @@ static ssize_t vtpm_op_read(struct file 
                                mod_timer(&pak->processing_timer,
                                          jiffies + (num_frontends * 60 * HZ));
                                dataex.copied_so_far = 0;
+                       } else {
+                               list_add(&pak->next, &dataex.pending_pak);
                        }
                }
        }
@@ -601,8 +622,8 @@ static ssize_t vtpm_op_write(struct file
 
        if (pak == NULL) {
                write_unlock_irqrestore(&dataex.pak_lock, flags);
-               printk(KERN_ALERT "No associated packet! (inst=%d)\n",
-                      ntohl(vrh.instance_no));
+               DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
+                       ntohl(vrh.instance_no));
                return -EFAULT;
        }
 
@@ -784,15 +805,17 @@ static int tpm_send_fail_message(struct 
        return rc;
 }
 
-static void _vtpm_release_packets(struct list_head *head,
-                                 tpmif_t * tpmif, int send_msgs)
-{
+static int _vtpm_release_packets(struct list_head *head,
+                                tpmif_t * tpmif, int send_msgs)
+{
+       int aborted = 0;
+       int c = 0;
        struct packet *pak;
-       struct list_head *pos,
-                *tmp;
+       struct list_head *pos, *tmp;
 
        list_for_each_safe(pos, tmp, head) {
                pak = list_entry(pos, struct packet, next);
+               c += 1;
 
                if (tpmif == NULL || pak->tpmif == tpmif) {
                        int can_send = 0;
@@ -808,8 +831,11 @@ static void _vtpm_release_packets(struct
                                tpm_send_fail_message(pak, pak->req_tag);
                        }
                        packet_free(pak);
-               }
-       }
+                       if (c == 1)
+                               aborted = 1;
+               }
+       }
+       return aborted;
 }
 
 int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
@@ -818,7 +844,9 @@ int vtpm_release_packets(tpmif_t * tpmif
 
        write_lock_irqsave(&dataex.pak_lock, flags);
 
-       _vtpm_release_packets(&dataex.pending_pak, tpmif, send_msgs);
+       dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
+                                              tpmif,
+                                              send_msgs);
        _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
 
        write_unlock_irqrestore(&dataex.pak_lock, flags);
@@ -1020,11 +1048,7 @@ static int __init tpmback_init(void)
                return rc;
        }
 
-       INIT_LIST_HEAD(&dataex.pending_pak);
-       INIT_LIST_HEAD(&dataex.current_pak);
-       dataex.has_opener = 0;
-       rwlock_init(&dataex.pak_lock);
-       init_waitqueue_head(&dataex.wait_queue);
+       dataex_init(&dataex);
 
        spin_lock_init(&tpm_schedule_list_lock);
        INIT_LIST_HEAD(&tpm_schedule_list);
@@ -1041,6 +1065,7 @@ module_init(tpmback_init);
 
 static void __exit tpmback_exit(void)
 {
+       vtpm_release_packets(NULL, 0);
        tpmif_xenbus_exit();
        tpmif_interface_exit();
        misc_deregister(&vtpms_miscdevice);
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c
--- a/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Tue May 02 
18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/xenbus/xenbus_xs.c       Thu May 04 
14:19:19 2006 +0100
@@ -183,7 +183,7 @@ void *xenbus_dev_request_and_reply(struc
 
        mutex_unlock(&xs_state.request_mutex);
 
-       if ((msg->type == XS_TRANSACTION_END) ||
+       if ((req_msg.type == XS_TRANSACTION_END) ||
            ((req_msg.type == XS_TRANSACTION_START) &&
             (msg->type == XS_ERROR)))
                up_read(&xs_state.suspend_mutex);
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h
--- a/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h        Tue May 02 
18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/include/asm-ia64/hypervisor.h        Thu May 04 
14:19:19 2006 +0100
@@ -134,7 +134,6 @@ HYPERVISOR_poll(
 #define        pte_mfn(_x)     pte_pfn(_x)
 #define __pte_ma(_x)   ((pte_t) {(_x)})
 #define phys_to_machine_mapping_valid(_x)      (1)
-#define        kmap_flush_unused()     do {} while (0)
 #define pfn_pte_ma(_x,_y)      __pte_ma(0)
 #ifndef CONFIG_XEN_IA64_DOM0_VP //XXX
 #define set_phys_to_machine(_x,_y)     do {} while (0)
diff -r 55f73916d319 -r 51484df99be1 linux-2.6-xen-sparse/mm/Kconfig
--- a/linux-2.6-xen-sparse/mm/Kconfig   Tue May 02 18:17:59 2006 +0100
+++ b/linux-2.6-xen-sparse/mm/Kconfig   Thu May 04 14:19:19 2006 +0100
@@ -126,14 +126,14 @@ comment "Memory hotplug is currently inc
 # Default to 4 for wider testing, though 8 might be more appropriate.
 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
-# XEN uses the mapping field on pagetable pages to store a pointer to
-# the destructor.
+# XEN on x86 architecture uses the mapping field on pagetable pages to store a
+# pointer to the destructor. This conflicts with pte_lock_deinit().
 #
 config SPLIT_PTLOCK_CPUS
        int
        default "4096" if ARM && !CPU_CACHE_VIPT
        default "4096" if PARISC && !PA20
-       default "4096" if XEN
+       default "4096" if X86_XEN || X86_64_XEN
        default "4"
 
 #
diff -r 55f73916d319 -r 51484df99be1 tools/python/xen/xend/XendBootloader.py
--- a/tools/python/xen/xend/XendBootloader.py   Tue May 02 18:17:59 2006 +0100
+++ b/tools/python/xen/xend/XendBootloader.py   Thu May 04 14:19:19 2006 +0100
@@ -19,13 +19,13 @@ from XendLogging import log
 from XendLogging import log
 from XendError import VmError
 
-def bootloader(blexec, disk, quiet = 0, entry = None):
+def bootloader(blexec, disk, quiet = 0, blargs = None):
     """Run the boot loader executable on the given disk and return a
     config image.
     @param blexec  Binary to use as the boot loader
     @param disk Disk to run the boot loader on.
     @param quiet Run in non-interactive mode, just booting the default.
-    @param entry Default entry to boot."""
+    @param blargs Arguments to pass to the bootloader."""
     
     if not os.access(blexec, os.X_OK):
         msg = "Bootloader isn't executable"
@@ -48,8 +48,8 @@ def bootloader(blexec, disk, quiet = 0, 
         if quiet:
             args.append("-q")
         args.append("--output=%s" %(fifo,))
-        if entry is not None:
-            args.append("--entry=%s" %(entry,))
+        if blargs is not None:
+            args.extend(blargs.split())
         args.append(disk)
 
         try:
diff -r 55f73916d319 -r 51484df99be1 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Tue May 02 18:17:59 2006 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py   Thu May 04 14:19:19 2006 +0100
@@ -132,6 +132,7 @@ ROUNDTRIPPING_CONFIG_ENTRIES = [
     ('memory',     int),
     ('maxmem',     int),
     ('bootloader', str),
+    ('bootloader_args', str),
     ('features', str),
     ]
 
@@ -571,6 +572,7 @@ class XendDomainInfo:
             defaultInfo('memory',       lambda: 0)
             defaultInfo('maxmem',       lambda: 0)
             defaultInfo('bootloader',   lambda: None)
+            defaultInfo('bootloader_args', lambda: None)            
             defaultInfo('backend',      lambda: [])
             defaultInfo('device',       lambda: [])
             defaultInfo('image',        lambda: None)
@@ -1630,7 +1632,8 @@ class XendDomainInfo:
             if disk is None:
                 continue
             fn = blkdev_uname_to_file(disk)
-            blcfg = bootloader(self.info['bootloader'], fn, 1)
+            blcfg = bootloader(self.info['bootloader'], fn, 1,
+                               self.info['bootloader_args'])
             break
         if blcfg is None:
             msg = "Had a bootloader specified, but can't find disk"
diff -r 55f73916d319 -r 51484df99be1 tools/python/xen/xm/create.py
--- a/tools/python/xen/xm/create.py     Tue May 02 18:17:59 2006 +0100
+++ b/tools/python/xen/xm/create.py     Thu May 04 14:19:19 2006 +0100
@@ -122,9 +122,13 @@ gopts.var('bootloader', val='FILE',
           fn=set_value, default=None,
           use="Path to bootloader.")
 
+gopts.var('bootargs', val='NAME',
+          fn=set_value, default=None,
+          use="Arguments to pass to boot loader")
+
 gopts.var('bootentry', val='NAME',
           fn=set_value, default=None,
-          use="Entry to boot via boot loader")
+          use="DEPRECATED.  Entry to boot via boot loader.  Use bootargs.")
 
 gopts.var('kernel', val='FILE',
           fn=set_value, default=None,
@@ -620,8 +624,13 @@ def run_bootloader(vals):
     (uname, dev, mode, backend) = vals.disk[0]
     file = blkif.blkdev_uname_to_file(uname)
 
+    if vals.bootentry:
+        warn("The bootentry option is deprecated.  Use bootargs and pass "
+             "--entry= directly.")
+        vals.bootargs = "--entry=%s" %(vals.bootentry,)
+
     return bootloader(vals.bootloader, file, not vals.console_autoconnect,
-                      vals.bootentry)
+                      vals.bootargs)
 
 def make_config(vals):
     """Create the domain configuration.
@@ -654,8 +663,10 @@ def make_config(vals):
         config.append(['backend', ['tpmif']])
 
     if vals.bootloader:
+        config_image = run_bootloader(vals)
         config.append(['bootloader', vals.bootloader])
-        config_image = run_bootloader(vals)
+        if vals.bootargs:
+            config.append(['bootloader_args'], vals.bootargs)
     else:
         config_image = configure_image(vals)
     config.append(['image', config_image])
diff -r 55f73916d319 -r 51484df99be1 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Tue May 02 18:17:59 2006 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu May 04 14:19:19 2006 +0100
@@ -79,6 +79,8 @@ void svm_dump_regs(const char *from, str
 
 static void svm_relinquish_guest_resources(struct domain *d);
 
+/* Host save area */
+struct host_save_area *host_save_area[ NR_CPUS ] = {0};
 static struct asid_pool ASIDpool[NR_CPUS];
 
 /*
@@ -185,11 +187,16 @@ void stop_svm(void)
 void stop_svm(void)
 {
     u32 eax, edx;    
+    int cpu = smp_processor_id();
 
     /* We turn off the EFER_SVME bit. */
     rdmsr(MSR_EFER, eax, edx);
     eax &= ~EFER_SVME;
     wrmsr(MSR_EFER, eax, edx);
+ 
+    /* release the HSA */
+    free_host_save_area( host_save_area[ cpu ] );
+    host_save_area[ cpu ] = NULL;
 
     printk("AMD SVM Extension is disabled.\n");
 }
@@ -431,8 +438,11 @@ int start_svm(void)
 int start_svm(void)
 {
     u32 eax, ecx, edx;
-    
-    /* Xen does not fill x86_capability words except 0. */
+    u32 phys_hsa_lo, phys_hsa_hi;   
+    u64 phys_hsa;
+    int cpu = smp_processor_id();
+ 
+   /* Xen does not fill x86_capability words except 0. */
     ecx = cpuid_ecx(0x80000001);
     boot_cpu_data.x86_capability[5] = ecx;
     
@@ -443,7 +453,14 @@ int start_svm(void)
     eax |= EFER_SVME;
     wrmsr(MSR_EFER, eax, edx);
     asidpool_init(smp_processor_id());    
-    printk("AMD SVM Extension is enabled for cpu %d.\n", smp_processor_id());
+    printk("AMD SVM Extension is enabled for cpu %d.\n", cpu );
+
+    /* Initialize the HSA for this core */
+    host_save_area[ cpu ] = alloc_host_save_area();
+    phys_hsa = (u64) virt_to_maddr( host_save_area[ cpu ] ); 
+    phys_hsa_lo = (u32) phys_hsa;
+    phys_hsa_hi = (u32) (phys_hsa >> 32);    
+    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
     
     /* Setup HVM interfaces */
     hvm_funcs.disable = stop_svm;
@@ -546,20 +563,6 @@ void save_svm_cpu_user_regs(struct vcpu 
     ctxt->ds = vmcb->ds.sel;
 }
 
-#if defined (__x86_64__)
-void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v )
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
-    regs->rip    = vmcb->rip;
-    regs->rsp    = vmcb->rsp;
-    regs->rflags = vmcb->rflags;
-    regs->cs     = vmcb->cs.sel;
-    regs->ds     = vmcb->ds.sel;
-    regs->es     = vmcb->es.sel;
-    regs->ss     = vmcb->ss.sel;
-}
-#elif defined (__i386__)
 void svm_store_cpu_user_regs(struct cpu_user_regs *regs, struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -571,11 +574,11 @@ void svm_store_cpu_user_regs(struct cpu_
     regs->ds     = vmcb->ds.sel;
     regs->es     = vmcb->es.sel;
     regs->ss     = vmcb->ss.sel;
-}
-#endif
+    regs->fs     = vmcb->fs.sel;
+    regs->gs     = vmcb->gs.sel;
+}
 
 /* XXX Use svm_load_cpu_guest_regs instead */
-#if defined (__i386__)
 void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
 { 
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
@@ -588,30 +591,17 @@ void svm_load_cpu_user_regs(struct vcpu 
     vmcb->rflags   = regs->eflags;
     vmcb->cs.sel   = regs->cs;
     vmcb->rip      = regs->eip;
+
+    vmcb->ds.sel   = regs->ds;
+    vmcb->es.sel   = regs->es;
+    vmcb->fs.sel   = regs->fs;
+    vmcb->gs.sel   = regs->gs;
+
     if (regs->eflags & EF_TF)
         *intercepts |= EXCEPTION_BITMAP_DB;
     else
         *intercepts &= ~EXCEPTION_BITMAP_DB;
 }
-#else /* (__i386__) */
-void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
-    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    u32 *intercepts = &v->arch.hvm_svm.vmcb->exception_intercepts;
-    
-    /* Write the guest register value into VMCB */
-    vmcb->rax      = regs->rax;
-    vmcb->ss.sel   = regs->ss;
-    vmcb->rsp      = regs->rsp;   
-    vmcb->rflags   = regs->rflags;
-    vmcb->cs.sel   = regs->cs;
-    vmcb->rip      = regs->rip;
-    if (regs->rflags & EF_TF)
-        *intercepts |= EXCEPTION_BITMAP_DB;
-    else
-        *intercepts &= ~EXCEPTION_BITMAP_DB;
-}
-#endif /* !(__i386__) */
 
 int svm_paging_enabled(struct vcpu *v)
 {
@@ -735,10 +725,6 @@ static void svm_relinquish_guest_resourc
     {
         if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
             continue;
-#if 0
-        /* Memory leak by not freeing this. XXXKAF: *Why* is not per core?? */
-        free_host_save_area(v->arch.hvm_svm.host_save_area);
-#endif
 
         destroy_vmcb(&v->arch.hvm_svm);
         free_monitor_pagetable(v);
diff -r 55f73916d319 -r 51484df99be1 xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Tue May 02 18:17:59 2006 +0100
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Thu May 04 14:19:19 2006 +0100
@@ -36,9 +36,11 @@
 #include <xen/kernel.h>
 #include <xen/domain_page.h>
 
+extern struct host_save_area *host_save_area[];
 extern int svm_dbg_on;
 extern int asidpool_assign_next( struct vmcb_struct *vmcb, int retire_current,
                                   int oldcore, int newcore);
+extern void set_hsa_to_guest( struct arch_svm_struct *arch_svm );
 
 #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */
 
@@ -309,8 +311,6 @@ int construct_vmcb(struct arch_svm_struc
 {
     int error;
     long rc=0;
-    struct host_save_area *hsa = NULL;
-    u64 phys_hsa;
 
     memset(arch_svm, 0, sizeof(struct arch_svm_struct));
 
@@ -320,36 +320,9 @@ int construct_vmcb(struct arch_svm_struc
         goto err_out;
     }
 
-    /* 
-     * The following code is for allocating host_save_area.
-     * Note: We either allocate a Host Save Area per core or per VCPU. 
-     * However, we do not want a global data structure 
-     * for HSA per core, we decided to implement a HSA for each VCPU. 
-     * It will waste space since VCPU number is larger than core number. 
-     * But before we find a better place for HSA for each core, we will 
-     * stay will this solution.
-     */
-
-    if (!(hsa = alloc_host_save_area())) 
-    {
-        printk("Failed to allocate Host Save Area\n");
-        rc = -ENOMEM;
-        goto err_out;
-    }
-
-    phys_hsa = (u64) virt_to_maddr(hsa);
-    arch_svm->host_save_area = hsa;
-    arch_svm->host_save_pa   = phys_hsa;
-
+    /* update the HSA for the current Core */
+    set_hsa_to_guest( arch_svm );
     arch_svm->vmcb_pa  = (u64) virt_to_maddr(arch_svm->vmcb);
-
-    if ((error = load_vmcb(arch_svm, arch_svm->host_save_pa))) 
-    {
-        printk("construct_vmcb: load_vmcb failed: VMCB = %lx\n",
-               (unsigned long) arch_svm->host_save_pa);
-        rc = -EINVAL;         
-        goto err_out;
-    }
 
     if ((error = construct_vmcb_controls(arch_svm))) 
     {
@@ -458,18 +431,11 @@ void svm_do_launch(struct vcpu *v)
 }
 
 
-int load_vmcb(struct arch_svm_struct *arch_svm, u64 phys_hsa) 
-{
-    u32 phys_hsa_lo, phys_hsa_hi;
-    
-    phys_hsa_lo = (u32) phys_hsa;
-    phys_hsa_hi = (u32) (phys_hsa >> 32);
-    
-    wrmsr(MSR_K8_VM_HSAVE_PA, phys_hsa_lo, phys_hsa_hi);
-    set_bit(ARCH_SVM_VMCB_LOADED, &arch_svm->flags); 
-    return 0;
-}
-
+void set_hsa_to_guest( struct arch_svm_struct *arch_svm ) 
+{
+    arch_svm->host_save_area = host_save_area[ smp_processor_id() ];
+    arch_svm->host_save_pa   = (u64)virt_to_maddr( arch_svm->host_save_area );
+}
 
 /* 
  * Resume the guest.
@@ -481,6 +447,9 @@ void svm_do_resume(struct vcpu *v)
     struct hvm_time_info *time_info = &vpit->time_info;
 
     svm_stts(v);
+
+    /* make sure the HSA is set for the current core */
+    set_hsa_to_guest( &v->arch.hvm_svm );
     
     /* pick up the elapsed PIT ticks and re-enable pit_timer */
     if ( time_info->first_injected ) {
diff -r 55f73916d319 -r 51484df99be1 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Tue May 02 18:17:59 2006 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu May 04 14:19:19 2006 +0100
@@ -475,6 +475,45 @@ static void vmx_store_cpu_guest_regs(
         __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
 }
 
+/*
+ * The VMX spec (section 4.3.1.2, Checks on Guest Segment
+ * Registers) says that virtual-8086 mode guests' segment
+ * base-address fields in the VMCS must be equal to their
+ * corresponding segment selector field shifted right by
+ * four bits upon vmentry.
+ *
+ * This function (called only for VM86-mode guests) fixes
+ * the bases to be consistent with the selectors in regs
+ * if they're not already.  Without this, we can fail the
+ * vmentry check mentioned above.
+ */
+static void fixup_vm86_seg_bases(struct cpu_user_regs *regs)
+{
+    int err = 0;
+    unsigned long base;
+
+    err |= __vmread(GUEST_ES_BASE, &base);
+    if (regs->es << 4 != base)
+        err |= __vmwrite(GUEST_ES_BASE, regs->es << 4);
+    err |= __vmread(GUEST_CS_BASE, &base);
+    if (regs->cs << 4 != base)
+        err |= __vmwrite(GUEST_CS_BASE, regs->cs << 4);
+    err |= __vmread(GUEST_SS_BASE, &base);
+    if (regs->ss << 4 != base)
+        err |= __vmwrite(GUEST_SS_BASE, regs->ss << 4);
+    err |= __vmread(GUEST_DS_BASE, &base);
+    if (regs->ds << 4 != base)
+        err |= __vmwrite(GUEST_DS_BASE, regs->ds << 4);
+    err |= __vmread(GUEST_FS_BASE, &base);
+    if (regs->fs << 4 != base)
+        err |= __vmwrite(GUEST_FS_BASE, regs->fs << 4);
+    err |= __vmread(GUEST_GS_BASE, &base);
+    if (regs->gs << 4 != base)
+        err |= __vmwrite(GUEST_GS_BASE, regs->gs << 4);
+
+    BUG_ON(err);
+}
+
 void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
 {
     if ( v != current )
@@ -511,6 +550,8 @@ void vmx_load_cpu_guest_regs(struct vcpu
         __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
     else
         __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
+    if (regs->eflags & EF_VM)
+        fixup_vm86_seg_bases(regs);
 
     __vmwrite(GUEST_CS_SELECTOR, regs->cs);
     __vmwrite(GUEST_RIP, regs->eip);
diff -r 55f73916d319 -r 51484df99be1 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Tue May 02 18:17:59 2006 +0100
+++ b/xen/arch/x86/x86_32/mm.c  Thu May 04 14:19:19 2006 +0100
@@ -155,17 +155,10 @@ void subarch_init_memory(void)
      * 64-bit operations on them. Also, just for sanity, we assert the size
      * of the structure here.
      */
-    if ( (offsetof(struct page_info, u.inuse._domain) != 
-          (offsetof(struct page_info, count_info) + sizeof(u32))) ||
-         ((offsetof(struct page_info, count_info) & 7) != 0) ||
-         (sizeof(struct page_info) != 24) )
-    {
-        printk("Weird page_info layout (%ld,%ld,%d)\n",
-               offsetof(struct page_info, count_info),
-               offsetof(struct page_info, u.inuse._domain),
-               sizeof(struct page_info));
-        BUG();
-    }
+    BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
+                 (offsetof(struct page_info, count_info) + sizeof(u32)));
+    BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
+    BUILD_BUG_ON(sizeof(struct page_info) != 24);
 
     /* M2P table is mappable read-only by privileged domains. */
     for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
diff -r 55f73916d319 -r 51484df99be1 xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Tue May 02 18:17:59 2006 +0100
+++ b/xen/arch/x86/x86_64/mm.c  Thu May 04 14:19:19 2006 +0100
@@ -145,19 +145,11 @@ void subarch_init_memory(void)
      * count_info and domain fields must be adjacent, as we perform atomic
      * 64-bit operations on them.
      */
-    if ( ((offsetof(struct page_info, u.inuse._domain) != 
-           (offsetof(struct page_info, count_info) + sizeof(u32)))) ||
-         ((offsetof(struct page_info, count_info) & 7) != 0) ||
-         (sizeof(struct page_info) !=
-          (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long))) )
-    {
-        printk("Weird page_info layout (%ld,%ld,%ld,%ld)\n",
-               offsetof(struct page_info, count_info),
-               offsetof(struct page_info, u.inuse._domain),
-               sizeof(struct page_info),
-               32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long));
-        for ( ; ; ) ;
-    }
+    BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != 
+                 (offsetof(struct page_info, count_info) + sizeof(u32)));
+    BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0);
+    BUILD_BUG_ON(sizeof(struct page_info) !=
+                 (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long)));
 
     /* M2P table is mappable read-only by privileged domains. */
     for ( v  = RDWR_MPT_VIRT_START; 
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.c  Thu May 04 14:19:19 
2006 +0100
@@ -0,0 +1,546 @@
+/*
+ * Copyright (C) 2006 IBM Corporation
+ *
+ * Authors:
+ * Stefan Berger <stefanb@xxxxxxxxxx>
+ *
+ * Generic device driver part for device drivers in a virtualized
+ * environment.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <asm/uaccess.h>
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include "tpm.h"
+#include "tpm_vtpm.h"
+
+/* read status bits */
+enum {
+       STATUS_BUSY = 0x01,
+       STATUS_DATA_AVAIL = 0x02,
+       STATUS_READY = 0x04
+};
+
+#define MIN(x,y)  ((x) < (y)) ? (x) : (y)
+
+struct transmission {
+       struct list_head next;
+
+       unsigned char *request;
+       size_t  request_len;
+       size_t  request_buflen;
+
+       unsigned char *response;
+       size_t  response_len;
+       size_t  response_buflen;
+
+       unsigned int flags;
+};
+
+enum {
+       TRANSMISSION_FLAG_WAS_QUEUED = 0x1
+};
+
+struct vtpm_state {
+       struct transmission *current_request;
+       spinlock_t           req_list_lock;
+       wait_queue_head_t    req_wait_queue;
+
+       struct list_head     queued_requests;
+
+       struct transmission *current_response;
+       spinlock_t           resp_list_lock;
+       wait_queue_head_t    resp_wait_queue;     // processes waiting for 
responses
+
+       struct transmission *req_cancelled;       // if a cancellation was 
encounterd
+
+       u8                   vd_status;
+       u8                   flags;
+
+       unsigned long        disconnect_time;
+
+       struct tpm_virtual_device *tpmvd;
+};
+
+enum {
+       DATAEX_FLAG_QUEUED_ONLY = 0x1
+};
+
+
+/* local variables */
+static struct vtpm_state *vtpms;
+
+/* local function prototypes */
+static int _vtpm_send_queued(struct tpm_chip *chip);
+
+
+/* =============================================================
+ * Some utility functions
+ * =============================================================
+ */
+static void vtpm_state_init(struct vtpm_state *vtpms)
+{
+       vtpms->current_request = NULL;
+       spin_lock_init(&vtpms->req_list_lock);
+       init_waitqueue_head(&vtpms->req_wait_queue);
+       INIT_LIST_HEAD(&vtpms->queued_requests);
+
+       vtpms->current_response = NULL;
+       spin_lock_init(&vtpms->resp_list_lock);
+       init_waitqueue_head(&vtpms->resp_wait_queue);
+
+       vtpms->disconnect_time = jiffies;
+}
+
+
+static inline struct transmission *transmission_alloc(void)
+{
+       return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
+}
+
+static unsigned char *
+transmission_set_req_buffer(struct transmission *t,
+                            unsigned char *buffer, size_t len)
+{
+       if (t->request_buflen < len) {
+               kfree(t->request);
+               t->request = kmalloc(len, GFP_KERNEL);
+               if (!t->request) {
+                       t->request_buflen = 0;
+                       return NULL;
+               }
+               t->request_buflen = len;
+       }
+
+       memcpy(t->request, buffer, len);
+       t->request_len = len;
+
+       return t->request;
+}
+
+static unsigned char *
+transmission_set_res_buffer(struct transmission *t,
+                            const unsigned char *buffer, size_t len)
+{
+       if (t->response_buflen < len) {
+               kfree(t->response);
+               t->response = kmalloc(len, GFP_ATOMIC);
+               if (!t->response) {
+                       t->response_buflen = 0;
+                       return NULL;
+               }
+               t->response_buflen = len;
+       }
+
+       memcpy(t->response, buffer, len);
+       t->response_len = len;
+
+       return t->response;
+}
+
+static inline void transmission_free(struct transmission *t)
+{
+       kfree(t->request);
+       kfree(t->response);
+       kfree(t);
+}
+
+/* =============================================================
+ * Interface with the lower layer driver
+ * =============================================================
+ */
+/*
+ * Lower layer uses this function to make a response available.
+ */
+int vtpm_vd_recv(const unsigned char *buffer, size_t count, const void *ptr)
+{
+       unsigned long flags;
+       int ret_size = 0;
+       struct transmission *t;
+
+       /*
+        * The list with requests must contain one request
+        * only and the element there must be the one that
+        * was passed to me from the front-end.
+        */
+       spin_lock_irqsave(&vtpms->resp_list_lock, flags);
+       if (vtpms->current_request != ptr) {
+               printk("WARNING: The request pointer is different than the "
+                      "pointer the shared memory driver returned to me. "
+                      "%p != %p\n",
+                      vtpms->current_request, ptr);
+       }
+
+       /*
+        * If the request has been cancelled, just quit here
+        */
+       if (vtpms->req_cancelled == (struct transmission *)ptr) {
+               if (vtpms->current_request == vtpms->req_cancelled) {
+                       vtpms->current_request = NULL;
+               }
+               transmission_free(vtpms->req_cancelled);
+               vtpms->req_cancelled = NULL;
+               spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+               return 0;
+       }
+
+       if (NULL != (t = vtpms->current_request)) {
+               transmission_free(t);
+               vtpms->current_request = NULL;
+       }
+
+       t = transmission_alloc();
+       if (t) {
+               if (!transmission_set_res_buffer(t, buffer, count)) {
+                       transmission_free(t);
+                       spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+                       return -ENOMEM;
+               }
+               ret_size = count;
+               vtpms->current_response = t;
+               wake_up_interruptible(&vtpms->resp_wait_queue);
+       }
+       spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+
+       return ret_size;
+}
+
+
+/*
+ * Lower layer indicates its status (connected/disconnected)
+ */
+void vtpm_vd_status(u8 vd_status)
+{
+       vtpms->vd_status = vd_status;
+       if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
+               vtpms->disconnect_time = jiffies;
+       }
+}
+
+/* =============================================================
+ * Interface with the generic TPM driver
+ * =============================================================
+ */
+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       int rc = 0;
+       unsigned long flags;
+
+       /*
+        * Check if the previous operation only queued the command
+        * In this case there won't be a response, so I just
+        * return from here and reset that flag. In any other
+        * case I should receive a response from the back-end.
+        */
+       spin_lock_irqsave(&vtpms->resp_list_lock, flags);
+       if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
+               vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
+               spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+               /*
+                * The first few commands (measurements) must be
+                * queued since it might not be possible to talk to the
+                * TPM, yet.
+                * Return a response of up to 30 '0's.
+                */
+
+               count = MIN(count, 30);
+               memset(buf, 0x0, count);
+               return count;
+       }
+       /*
+        * Check whether something is in the responselist and if
+        * there's nothing in the list wait for something to appear.
+        */
+
+       if (!vtpms->current_response) {
+               spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+               interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
+                                              1000);
+               spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
+       }
+
+       if (vtpms->current_response) {
+               struct transmission *t = vtpms->current_response;
+               vtpms->current_response = NULL;
+               rc = MIN(count, t->response_len);
+               memcpy(buf, t->response, rc);
+               transmission_free(t);
+       }
+
+       spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+       return rc;
+}
+
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+       int rc = 0;
+       unsigned long flags;
+       struct transmission *t = transmission_alloc();
+
+       if (!t)
+               return -ENOMEM;
+       /*
+        * If there's a current request, it must be the
+        * previous request that has timed out.
+        */
+       spin_lock_irqsave(&vtpms->req_list_lock, flags);
+       if (vtpms->current_request != NULL) {
+               printk("WARNING: Sending although there is a request 
outstanding.\n"
+                      "         Previous request must have timed out.\n");
+               transmission_free(vtpms->current_request);
+               vtpms->current_request = NULL;
+       }
+       spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
+
+       /*
+        * Queue the packet if the driver below is not
+        * ready, yet, or there is any packet already
+        * in the queue.
+        * If the driver below is ready, unqueue all
+        * packets first before sending our current
+        * packet.
+        * For each unqueued packet, except for the
+        * last (=current) packet, call the function
+        * tpm_xen_recv to wait for the response to come
+        * back.
+        */
+       if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
+               if (time_after(jiffies,
+                              vtpms->disconnect_time + HZ * 10)) {
+                       rc = -ENOENT;
+               } else {
+                       goto queue_it;
+               }
+       } else {
+               /*
+                * Send all queued packets.
+                */
+               if (_vtpm_send_queued(chip) == 0) {
+
+                       vtpms->current_request = t;
+
+                       rc = vtpm_vd_send(chip,
+                                         vtpms->tpmvd->tpm_private,
+                                         buf,
+                                         count,
+                                         t);
+                       /*
+                        * The generic TPM driver will call
+                        * the function to receive the response.
+                        */
+                       if (rc < 0) {
+                               vtpms->current_request = NULL;
+                               goto queue_it;
+                       }
+               } else {
+queue_it:
+                       if (!transmission_set_req_buffer(t, buf, count)) {
+                               transmission_free(t);
+                               rc = -ENOMEM;
+                               goto exit;
+                       }
+                       /*
+                        * An error occurred. Don't event try
+                        * to send the current request. Just
+                        * queue it.
+                        */
+                       spin_lock_irqsave(&vtpms->req_list_lock, flags);
+                       vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
+                       list_add_tail(&t->next, &vtpms->queued_requests);
+                       spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
+               }
+       }
+
+exit:
+       return rc;
+}
+
+
+/*
+ * Send all queued requests.
+ */
+static int _vtpm_send_queued(struct tpm_chip *chip)
+{
+       int rc;
+       int error = 0;
+       long flags;
+       unsigned char buffer[1];
+
+       spin_lock_irqsave(&vtpms->req_list_lock, flags);
+
+       while (!list_empty(&vtpms->queued_requests)) {
+               /*
+                * Need to dequeue them.
+                * Read the result into a dummy buffer.
+                */
+               struct transmission *qt = (struct transmission *)
+                                         vtpms->queued_requests.next;
+               list_del(&qt->next);
+               vtpms->current_request = qt;
+               spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
+
+               rc = vtpm_vd_send(chip,
+                                 vtpms->tpmvd->tpm_private,
+                                 qt->request,
+                                 qt->request_len,
+                                 qt);
+
+               if (rc < 0) {
+                       spin_lock_irqsave(&vtpms->req_list_lock, flags);
+                       if ((qt = vtpms->current_request) != NULL) {
+                               /*
+                                * requeue it at the beginning
+                                * of the list
+                                */
+                               list_add(&qt->next,
+                                        &vtpms->queued_requests);
+                       }
+                       vtpms->current_request = NULL;
+                       error = 1;
+                       break;
+               }
+               /*
+                * After this point qt is not valid anymore!
+                * It is freed when the front-end is delivering
+                * the data by calling tpm_recv
+                */
+               /*
+                * Receive response into provided dummy buffer
+                */
+               rc = vtpm_recv(chip, buffer, sizeof(buffer));
+               spin_lock_irqsave(&vtpms->req_list_lock, flags);
+       }
+
+       spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
+
+       return error;
+}
+
+static void vtpm_cancel(struct tpm_chip *chip)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&vtpms->resp_list_lock,flags);
+
+       vtpms->req_cancelled = vtpms->current_request;
+
+       spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
+}
+
+static u8 vtpm_status(struct tpm_chip *chip)
+{
+       u8 rc = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vtpms->resp_list_lock, flags);
+       /*
+        * Data are available if:
+        *  - there's a current response
+        *  - the last packet was queued only (this is fake, but necessary to
+        *      get the generic TPM layer to call the receive function.)
+        */
+       if (vtpms->current_response ||
+           0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
+               rc = STATUS_DATA_AVAIL;
+       }
+       spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
+       return rc;
+}
+
+static struct file_operations vtpm_ops = {
+       .owner = THIS_MODULE,
+       .llseek = no_llseek,
+       .open = tpm_open,
+       .read = tpm_read,
+       .write = tpm_write,
+       .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
+
+static struct attribute *vtpm_attrs[] = {
+       &dev_attr_pubek.attr,
+       &dev_attr_pcrs.attr,
+       &dev_attr_caps.attr,
+       &dev_attr_cancel.attr,
+       NULL,
+};
+
+static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
+
+static struct tpm_vendor_specific tpm_vtpm = {
+       .recv = vtpm_recv,
+       .send = vtpm_send,
+       .cancel = vtpm_cancel,
+       .status = vtpm_status,
+       .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
+       .req_complete_val  = STATUS_DATA_AVAIL,
+       .req_canceled = STATUS_READY,
+       .base = 0,
+       .attr_group = &vtpm_attr_grp,
+       .miscdev = {
+               .fops = &vtpm_ops,
+       },
+};
+
+static struct platform_device *pdev;
+
+int __init init_vtpm(struct tpm_virtual_device *tvd)
+{
+       int rc;
+
+       /* vtpms is global - only allow one user */
+       if (vtpms)
+               return -EBUSY;
+
+       vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
+       if (!vtpms)
+               return -ENOMEM;
+
+       vtpm_state_init(vtpms);
+       vtpms->tpmvd = tvd;
+
+       pdev = platform_device_register_simple("tpm_vtpm", -1, NULL, 0);
+       if (IS_ERR(pdev)) {
+               rc = PTR_ERR(pdev);
+               goto err_free_mem;
+       }
+
+       if (tvd)
+               tpm_vtpm.buffersize = tvd->max_tx_size;
+
+       if ((rc = tpm_register_hardware(&pdev->dev, &tpm_vtpm)) < 0) {
+               goto err_unreg_pdev;
+       }
+
+       return 0;
+
+err_unreg_pdev:
+       platform_device_unregister(pdev);
+err_free_mem:
+       kfree(vtpms);
+       vtpms = NULL;
+
+       return rc;
+}
+
+void __exit cleanup_vtpm(void)
+{
+       struct tpm_chip *chip = dev_get_drvdata(&pdev->dev);
+       if (chip) {
+               tpm_remove_hardware(chip->dev);
+               platform_device_unregister(pdev);
+       }
+       kfree(vtpms);
+       vtpms = NULL;
+}
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.h
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/linux-2.6-xen-sparse/drivers/char/tpm/tpm_vtpm.h  Thu May 04 14:19:19 
2006 +0100
@@ -0,0 +1,38 @@
+#ifndef TPM_VTPM_H
+#define TPM_VTPM_H
+
+struct tpm_chip;
+struct tpm_private;
+
+struct tpm_virtual_device {
+       /*
+        * This field indicates the maximum size the driver can
+        * transfer in one chunk. It is filled in by the front-end
+        * driver and should be propagated to the generic tpm driver
+        * for allocation of buffers.
+        */
+       unsigned int max_tx_size;
+       /*
+        * The following is a private structure of the underlying
+        * driver. It is passed as parameter in the send function.
+        */
+       struct tpm_private *tpm_private;
+};
+
+enum vdev_status {
+       TPM_VD_STATUS_DISCONNECTED = 0x0,
+       TPM_VD_STATUS_CONNECTED = 0x1
+};
+
+/* this function is called from tpm_vtpm.c */
+int vtpm_vd_send(struct tpm_chip *tc,
+                 struct tpm_private * tp,
+                 const u8 * buf, size_t count, void *ptr);
+
+/* these functions are offered by tpm_vtpm.c */
+int __init init_vtpm(struct tpm_virtual_device *);
+void __exit cleanup_vtpm(void);
+int vtpm_vd_recv(const unsigned char *buffer, size_t count, const void *ptr);
+void vtpm_vd_status(u8 status);
+
+#endif
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/Makefile        Tue May 02 
18:17:59 2006 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-
-obj-$(CONFIG_XEN_TPMDEV_FRONTEND)      += tpmfront.o
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.c      Tue May 02 
18:17:59 2006 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,767 +0,0 @@
-/*
- * Copyright (c) 2005, IBM Corporation
- *
- * Author: Stefan Berger, stefanb@xxxxxxxxxx
- * Grant table support: Mahadevan Gomathisankaran
- *
- * This code has been derived from drivers/xen/netfront/netfront.c
- *
- * Copyright (c) 2002-2004, K A Fraser
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation; or, when distributed
- * separately from the Linux kernel or incorporated into other
- * software packages, subject to the following license:
- * 
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this source file (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use, copy, modify,
- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <xen/tpmfe.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <asm/io.h>
-#include <xen/evtchn.h>
-#include <xen/interface/grant_table.h>
-#include <xen/interface/io/tpmif.h>
-#include <asm/uaccess.h>
-#include <xen/xenbus.h>
-#include <xen/interface/grant_table.h>
-
-#include "tpmfront.h"
-
-#undef DEBUG
-
-/* locally visible variables */
-static grant_ref_t gref_head;
-static struct tpm_private *my_priv;
-
-/* local function prototypes */
-static irqreturn_t tpmif_int(int irq,
-                             void *tpm_priv,
-                             struct pt_regs *ptregs);
-static void tpmif_rx_action(unsigned long unused);
-static int tpmif_connect(struct xenbus_device *dev,
-                         struct tpm_private *tp,
-                         domid_t domid);
-static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
-static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
-static void tpmif_free_tx_buffers(struct tpm_private *tp);
-static void tpmif_set_connected_state(struct tpm_private *tp,
-                                      u8 newstate);
-static int tpm_xmit(struct tpm_private *tp,
-                    const u8 * buf, size_t count, int userbuffer,
-                    void *remember);
-static void destroy_tpmring(struct tpm_private *tp);
-
-#define DPRINTK(fmt, args...) \
-    pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
-#define IPRINTK(fmt, args...) \
-    printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
-#define WPRINTK(fmt, args...) \
-    printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
-
-#define GRANT_INVALID_REF      0
-
-
-static inline int
-tx_buffer_copy(struct tx_buffer *txb, const u8 * src, int len,
-               int isuserbuffer)
-{
-       int copied = len;
-
-       if (len > txb->size) {
-               copied = txb->size;
-       }
-       if (isuserbuffer) {
-               if (copy_from_user(txb->data, src, copied))
-                       return -EFAULT;
-       } else {
-               memcpy(txb->data, src, copied);
-       }
-       txb->len = len;
-       return copied;
-}
-
-static inline struct tx_buffer *tx_buffer_alloc(void)
-{
-       struct tx_buffer *txb = kzalloc(sizeof (struct tx_buffer),
-                                       GFP_KERNEL);
-
-       if (txb) {
-               txb->len = 0;
-               txb->size = PAGE_SIZE;
-               txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
-               if (txb->data == NULL) {
-                       kfree(txb);
-                       txb = NULL;
-               }
-       }
-       return txb;
-}
-
-
-static inline void tx_buffer_free(struct tx_buffer *txb)
-{
-       if (txb) {
-               free_page((long)txb->data);
-               kfree(txb);
-       }
-}
-
-/**************************************************************
- Utility function for the tpm_private structure
-**************************************************************/
-static inline void tpm_private_init(struct tpm_private *tp)
-{
-       spin_lock_init(&tp->tx_lock);
-       init_waitqueue_head(&tp->wait_q);
-}
-
-static inline void tpm_private_free(void)
-{
-       tpmif_free_tx_buffers(my_priv);
-       kfree(my_priv);
-       my_priv = NULL;
-}
-
-static struct tpm_private *tpm_private_get(void)
-{
-       int err;
-       if (!my_priv) {
-               my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
-               if (my_priv) {
-                       tpm_private_init(my_priv);
-                       err = tpmif_allocate_tx_buffers(my_priv);
-                       if (err < 0) {
-                               tpm_private_free();
-                       }
-               }
-       }
-       return my_priv;
-}
-
-/**************************************************************
-
- The interface to let the tpm plugin register its callback
- function and send data to another partition using this module
-
-**************************************************************/
-
-static DEFINE_MUTEX(upperlayer_lock);
-static DEFINE_MUTEX(suspend_lock);
-static struct tpmfe_device *upperlayer_tpmfe;
-
-/*
- * Send data via this module by calling this function
- */
-int tpm_fe_send(struct tpm_private *tp, const u8 * buf, size_t count, void 
*ptr)
-{
-       int sent;
-
-       mutex_lock(&suspend_lock);
-       sent = tpm_xmit(tp, buf, count, 0, ptr);
-       mutex_unlock(&suspend_lock);
-
-       return sent;
-}
-EXPORT_SYMBOL(tpm_fe_send);
-
-/*
- * Register a callback for receiving data from this module
- */
-int tpm_fe_register_receiver(struct tpmfe_device *tpmfe_dev)
-{
-       int rc = 0;
-
-       mutex_lock(&upperlayer_lock);
-       if (NULL == upperlayer_tpmfe) {
-               upperlayer_tpmfe = tpmfe_dev;
-               tpmfe_dev->max_tx_size = TPMIF_TX_RING_SIZE * PAGE_SIZE;
-               tpmfe_dev->tpm_private = tpm_private_get();
-               if (!tpmfe_dev->tpm_private) {
-                       rc = -ENOMEM;
-               }
-       } else {
-               rc = -EBUSY;
-       }
-       mutex_unlock(&upperlayer_lock);
-       return rc;
-}
-EXPORT_SYMBOL(tpm_fe_register_receiver);
-
-/*
- * Unregister the callback for receiving data from this module
- */
-void tpm_fe_unregister_receiver(void)
-{
-       mutex_lock(&upperlayer_lock);
-       upperlayer_tpmfe = NULL;
-       mutex_unlock(&upperlayer_lock);
-}
-EXPORT_SYMBOL(tpm_fe_unregister_receiver);
-
-/*
- * Call this function to send data to the upper layer's
- * registered receiver function.
- */
-static int tpm_fe_send_upperlayer(const u8 * buf, size_t count,
-                                  const void *ptr)
-{
-       int rc = 0;
-
-       mutex_lock(&upperlayer_lock);
-
-       if (upperlayer_tpmfe && upperlayer_tpmfe->receive)
-               rc = upperlayer_tpmfe->receive(buf, count, ptr);
-
-       mutex_unlock(&upperlayer_lock);
-       return rc;
-}
-
-/**************************************************************
- XENBUS support code
-**************************************************************/
-
-static int setup_tpmring(struct xenbus_device *dev,
-                         struct tpm_private *tp)
-{
-       tpmif_tx_interface_t *sring;
-       int err;
-
-       tp->ring_ref = GRANT_INVALID_REF;
-
-       sring = (void *)__get_free_page(GFP_KERNEL);
-       if (!sring) {
-               xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
-               return -ENOMEM;
-       }
-       tp->tx = sring;
-
-       err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
-       if (err < 0) {
-               free_page((unsigned long)sring);
-               tp->tx = NULL;
-               xenbus_dev_fatal(dev, err, "allocating grant reference");
-               goto fail;
-       }
-       tp->ring_ref = err;
-
-       err = tpmif_connect(dev, tp, dev->otherend_id);
-       if (err)
-               goto fail;
-
-       return 0;
-fail:
-       destroy_tpmring(tp);
-       return err;
-}
-
-
-static void destroy_tpmring(struct tpm_private *tp)
-{
-       tpmif_set_connected_state(tp, 0);
-
-       if (tp->ring_ref != GRANT_INVALID_REF) {
-               gnttab_end_foreign_access(tp->ring_ref, 0,
-                                         (unsigned long)tp->tx);
-               tp->ring_ref = GRANT_INVALID_REF;
-               tp->tx = NULL;
-       }
-
-       if (tp->irq)
-               unbind_from_irqhandler(tp->irq, tp);
-
-       tp->evtchn = tp->irq = 0;
-}
-
-
-static int talk_to_backend(struct xenbus_device *dev,
-                           struct tpm_private *tp)
-{
-       const char *message = NULL;
-       int err;
-       xenbus_transaction_t xbt;
-
-       err = setup_tpmring(dev, tp);
-       if (err) {
-               xenbus_dev_fatal(dev, err, "setting up ring");
-               goto out;
-       }
-
-again:
-       err = xenbus_transaction_start(&xbt);
-       if (err) {
-               xenbus_dev_fatal(dev, err, "starting transaction");
-               goto destroy_tpmring;
-       }
-
-       err = xenbus_printf(xbt, dev->nodename,
-                           "ring-ref","%u", tp->ring_ref);
-       if (err) {
-               message = "writing ring-ref";
-               goto abort_transaction;
-       }
-
-       err = xenbus_printf(xbt, dev->nodename,
-                           "event-channel", "%u", tp->evtchn);
-       if (err) {
-               message = "writing event-channel";
-               goto abort_transaction;
-       }
-
-       err = xenbus_transaction_end(xbt, 0);
-       if (err == -EAGAIN)
-               goto again;
-       if (err) {
-               xenbus_dev_fatal(dev, err, "completing transaction");
-               goto destroy_tpmring;
-       }
-
-       xenbus_switch_state(dev, XenbusStateConnected);
-
-       return 0;
-
-abort_transaction:
-       xenbus_transaction_end(xbt, 1);
-       if (message)
-               xenbus_dev_error(dev, err, "%s", message);
-destroy_tpmring:
-       destroy_tpmring(tp);
-out:
-       return err;
-}
-
-/**
- * Callback received when the backend's state changes.
- */
-static void backend_changed(struct xenbus_device *dev,
-                           XenbusState backend_state)
-{
-       struct tpm_private *tp = dev->data;
-       DPRINTK("\n");
-
-       switch (backend_state) {
-       case XenbusStateInitialising:
-       case XenbusStateInitWait:
-       case XenbusStateInitialised:
-       case XenbusStateUnknown:
-               break;
-
-       case XenbusStateConnected:
-               tpmif_set_connected_state(tp, 1);
-               break;
-
-       case XenbusStateClosing:
-               tpmif_set_connected_state(tp, 0);
-               break;
-
-       case XenbusStateClosed:
-               if (tp->is_suspended == 0) {
-                       device_unregister(&dev->dev);
-               }
-               xenbus_switch_state(dev, XenbusStateClosed);
-               break;
-       }
-}
-
-
-static int tpmfront_probe(struct xenbus_device *dev,
-                          const struct xenbus_device_id *id)
-{
-       int err;
-       int handle;
-       struct tpm_private *tp = tpm_private_get();
-
-       if (!tp)
-               return -ENOMEM;
-
-       err = xenbus_scanf(XBT_NULL, dev->nodename,
-                          "handle", "%i", &handle);
-       if (XENBUS_EXIST_ERR(err))
-               return err;
-
-       if (err < 0) {
-               xenbus_dev_fatal(dev,err,"reading virtual-device");
-               return err;
-       }
-
-       tp->dev = dev;
-       dev->data = tp;
-
-       err = talk_to_backend(dev, tp);
-       if (err) {
-               tpm_private_free();
-               dev->data = NULL;
-               return err;
-       }
-       return 0;
-}
-
-
-static int tpmfront_remove(struct xenbus_device *dev)
-{
-       struct tpm_private *tp = (struct tpm_private *)dev->data;
-       destroy_tpmring(tp);
-       return 0;
-}
-
-static int tpmfront_suspend(struct xenbus_device *dev)
-{
-       struct tpm_private *tp = (struct tpm_private *)dev->data;
-       u32 ctr;
-
-       /* lock, so no app can send */
-       mutex_lock(&suspend_lock);
-       xenbus_switch_state(dev, XenbusStateClosed);
-       tp->is_suspended = 1;
-
-       for (ctr = 0; atomic_read(&tp->tx_busy) && ctr <= 25; ctr++) {
-               if ((ctr % 10) == 0)
-                       printk("TPM-FE [INFO]: Waiting for outstanding 
request.\n");
-               /*
-                * Wait for a request to be responded to.
-                */
-               interruptible_sleep_on_timeout(&tp->wait_q, 100);
-       }
-
-       if (atomic_read(&tp->tx_busy)) {
-               /*
-                * A temporary work-around.
-                */
-               printk("TPM-FE [WARNING]: Resetting busy flag.");
-               atomic_set(&tp->tx_busy, 0);
-       }
-
-       return 0;
-}
-
-static int tpmfront_resume(struct xenbus_device *dev)
-{
-       struct tpm_private *tp = (struct tpm_private *)dev->data;
-       destroy_tpmring(tp);
-       return talk_to_backend(dev, tp);
-}
-
-static int tpmif_connect(struct xenbus_device *dev,
-                         struct tpm_private *tp,
-                         domid_t domid)
-{
-       int err;
-
-       tp->backend_id = domid;
-
-       err = xenbus_alloc_evtchn(dev, &tp->evtchn);
-       if (err)
-               return err;
-
-       err = bind_evtchn_to_irqhandler(tp->evtchn,
-                                       tpmif_int, SA_SAMPLE_RANDOM, "tpmif",
-                                       tp);
-       if (err <= 0) {
-               WPRINTK("bind_evtchn_to_irqhandler failed (err=%d)\n", err);
-               return err;
-       }
-
-       tp->irq = err;
-       return 0;
-}
-
-static struct xenbus_device_id tpmfront_ids[] = {
-       { "vtpm" },
-       { "" }
-};
-
-static struct xenbus_driver tpmfront = {
-       .name = "vtpm",
-       .owner = THIS_MODULE,
-       .ids = tpmfront_ids,
-       .probe = tpmfront_probe,
-       .remove =  tpmfront_remove,
-       .resume = tpmfront_resume,
-       .otherend_changed = backend_changed,
-       .suspend = tpmfront_suspend,
-};
-
-static void __init init_tpm_xenbus(void)
-{
-       xenbus_register_frontend(&tpmfront);
-}
-
-static void __exit exit_tpm_xenbus(void)
-{
-       xenbus_unregister_driver(&tpmfront);
-}
-
-static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
-{
-       unsigned int i;
-
-       for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
-               tp->tx_buffers[i] = tx_buffer_alloc();
-               if (!tp->tx_buffers[i]) {
-                       tpmif_free_tx_buffers(tp);
-                       return -ENOMEM;
-               }
-       }
-       return 0;
-}
-
-static void tpmif_free_tx_buffers(struct tpm_private *tp)
-{
-       unsigned int i;
-
-       for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
-               tx_buffer_free(tp->tx_buffers[i]);
-       }
-}
-
-static void tpmif_rx_action(unsigned long priv)
-{
-       struct tpm_private *tp = (struct tpm_private *)priv;
-
-       int i = 0;
-       unsigned int received;
-       unsigned int offset = 0;
-       u8 *buffer;
-       tpmif_tx_request_t *tx;
-       tx = &tp->tx->ring[i].req;
-
-       received = tx->size;
-
-       buffer = kmalloc(received, GFP_KERNEL);
-       if (NULL == buffer) {
-               goto exit;
-       }
-
-       for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
-               struct tx_buffer *txb = tp->tx_buffers[i];
-               tpmif_tx_request_t *tx;
-               unsigned int tocopy;
-
-               tx = &tp->tx->ring[i].req;
-               tocopy = tx->size;
-               if (tocopy > PAGE_SIZE) {
-                       tocopy = PAGE_SIZE;
-               }
-
-               memcpy(&buffer[offset], txb->data, tocopy);
-
-               gnttab_release_grant_reference(&gref_head, tx->ref);
-
-               offset += tocopy;
-       }
-
-       tpm_fe_send_upperlayer(buffer, received, tp->tx_remember);
-       kfree(buffer);
-
-exit:
-       atomic_set(&tp->tx_busy, 0);
-       wake_up_interruptible(&tp->wait_q);
-}
-
-
-static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
-{
-       struct tpm_private *tp = tpm_priv;
-       unsigned long flags;
-
-       spin_lock_irqsave(&tp->tx_lock, flags);
-       tpmif_rx_tasklet.data = (unsigned long)tp;
-       tasklet_schedule(&tpmif_rx_tasklet);
-       spin_unlock_irqrestore(&tp->tx_lock, flags);
-
-       return IRQ_HANDLED;
-}
-
-
-static int tpm_xmit(struct tpm_private *tp,
-                    const u8 * buf, size_t count, int isuserbuffer,
-                    void *remember)
-{
-       tpmif_tx_request_t *tx;
-       TPMIF_RING_IDX i;
-       unsigned int offset = 0;
-
-       spin_lock_irq(&tp->tx_lock);
-
-       if (unlikely(atomic_read(&tp->tx_busy))) {
-               printk("tpm_xmit: There's an outstanding request/response "
-                      "on the way!\n");
-               spin_unlock_irq(&tp->tx_lock);
-               return -EBUSY;
-       }
-
-       if (tp->is_connected != 1) {
-               spin_unlock_irq(&tp->tx_lock);
-               return -EIO;
-       }
-
-       for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
-               struct tx_buffer *txb = tp->tx_buffers[i];
-               int copied;
-
-               if (NULL == txb) {
-                       DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
-                               "Not transmitting anything!\n", i);
-                       spin_unlock_irq(&tp->tx_lock);
-                       return -EFAULT;
-               }
-               copied = tx_buffer_copy(txb, &buf[offset], count,
-                                       isuserbuffer);
-               if (copied < 0) {
-                       /* An error occurred */
-                       spin_unlock_irq(&tp->tx_lock);
-                       return copied;
-               }
-               count -= copied;
-               offset += copied;
-
-               tx = &tp->tx->ring[i].req;
-
-               tx->addr = virt_to_machine(txb->data);
-               tx->size = txb->len;
-
-               DPRINTK("First 4 characters sent by TPM-FE are 0x%02x 0x%02x 
0x%02x 0x%02x\n",
-                       txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
-
-               /* get the granttable reference for this page */
-               tx->ref = gnttab_claim_grant_reference(&gref_head);
-
-               if (-ENOSPC == tx->ref) {
-                       spin_unlock_irq(&tp->tx_lock);
-                       DPRINTK(" Grant table claim reference failed in func:%s 
line:%d file:%s\n", __FUNCTION__, __LINE__, __FILE__);
-                       return -ENOSPC;
-               }
-               gnttab_grant_foreign_access_ref( tx->ref,
-                                                tp->backend_id,
-                                                (tx->addr >> PAGE_SHIFT),
-                                                0 /*RW*/);
-               wmb();
-       }
-
-       atomic_set(&tp->tx_busy, 1);
-       tp->tx_remember = remember;
-       mb();
-
-       DPRINTK("Notifying backend via event channel %d\n",
-               tp->evtchn);
-
-       notify_remote_via_irq(tp->irq);
-
-       spin_unlock_irq(&tp->tx_lock);
-       return offset;
-}
-
-
-static void tpmif_notify_upperlayer(struct tpm_private *tp)
-{
-       /*
-        * Notify upper layer about the state of the connection
-        * to the BE.
-        */
-       mutex_lock(&upperlayer_lock);
-
-       if (upperlayer_tpmfe != NULL) {
-               if (tp->is_connected) {
-                       upperlayer_tpmfe->status(TPMFE_STATUS_CONNECTED);
-               } else {
-                       upperlayer_tpmfe->status(0);
-               }
-       }
-       mutex_unlock(&upperlayer_lock);
-}
-
-
-static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
-{
-       /*
-        * Don't notify upper layer if we are in suspend mode and
-        * should disconnect - assumption is that we will resume
-        * The mutex keeps apps from sending.
-        */
-       if (is_connected == 0 && tp->is_suspended == 1) {
-               return;
-       }
-
-       /*
-        * Unlock the mutex if we are connected again
-        * after being suspended - now resuming.
-        * This also removes the suspend state.
-        */
-       if (is_connected == 1 && tp->is_suspended == 1) {
-               tp->is_suspended = 0;
-               /* unlock, so apps can resume sending */
-               mutex_unlock(&suspend_lock);
-       }
-
-       if (is_connected != tp->is_connected) {
-               tp->is_connected = is_connected;
-               tpmif_notify_upperlayer(tp);
-       }
-}
-
-
-/* =================================================================
- * Initialization function.
- * =================================================================
- */
-
-static int __init tpmif_init(void)
-{
-       IPRINTK("Initialising the vTPM driver.\n");
-       if ( gnttab_alloc_grant_references ( TPMIF_TX_RING_SIZE,
-                                            &gref_head ) < 0) {
-               return -EFAULT;
-       }
-
-       init_tpm_xenbus();
-
-       return 0;
-}
-
-module_init(tpmif_init);
-
-static void __exit tpmif_exit(void)
-{
-       exit_tpm_xenbus();
-       gnttab_free_grant_references(gref_head);
-}
-
-module_exit(tpmif_exit);
-
-MODULE_LICENSE("Dual BSD/GPL");
-
-/*
- * Local variables:
- *  c-file-style: "linux"
- *  indent-tabs-mode: t
- *  c-indent-level: 8
- *  c-basic-offset: 8
- *  tab-width: 8
- * End:
- */
diff -r 55f73916d319 -r 51484df99be1 
linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h
--- a/linux-2.6-xen-sparse/drivers/xen/tpmfront/tpmfront.h      Tue May 02 
18:17:59 2006 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-#ifndef TPM_FRONT_H
-#define TPM_FRONT_H
-
-struct tpm_private {
-       tpmif_tx_interface_t *tx;
-       unsigned int evtchn;
-       unsigned int irq;
-       u8 is_connected;
-       u8 is_suspended;
-
-       spinlock_t tx_lock;
-
-       struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
-
-       atomic_t tx_busy;
-       void *tx_remember;
-       domid_t backend_id;
-       wait_queue_head_t wait_q;
-
-       struct xenbus_device *dev;
-       int ring_ref;
-};
-
-struct tx_buffer {
-       unsigned int size;      // available space in data
-       unsigned int len;       // used space in data
-       unsigned char *data;    // pointer to a page
-};
-
-#endif
-
-/*
- * Local variables:
- *  c-file-style: "linux"
- *  indent-tabs-mode: t
- *  c-indent-level: 8
- *  c-basic-offset: 8
- *  tab-width: 8
- * End:
- */
diff -r 55f73916d319 -r 51484df99be1 linux-2.6-xen-sparse/include/xen/tpmfe.h
--- a/linux-2.6-xen-sparse/include/xen/tpmfe.h  Tue May 02 18:17:59 2006 +0100
+++ /dev/null   Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-#ifndef TPM_FE_H
-#define TPM_FE_H
-
-struct tpm_private;
-
-struct tpmfe_device {
-       /*
-        * Let upper layer receive data from front-end
-        */
-       int (*receive)(const u8 *buffer, size_t count, const void *ptr);
-       /*
-        * Indicate the status of the front-end to the upper
-        * layer.
-        */
-       void (*status)(unsigned int flags);
-
-       /*
-        * This field indicates the maximum size the driver can
-        * transfer in one chunk. It is filled out by the front-end
-        * driver and should be propagated to the generic tpm driver
-        * for allocation of buffers.
-        */
-       unsigned int max_tx_size;
-       /*
-        * The following is a private structure of the underlying
-        * driver. It's expected as first parameter in the send function.
-        */
-       struct tpm_private *tpm_private;
-};
-
-enum {
-       TPMFE_STATUS_DISCONNECTED = 0x0,
-       TPMFE_STATUS_CONNECTED = 0x1
-};
-
-int tpm_fe_send(struct tpm_private * tp, const u8 * buf, size_t count, void 
*ptr);
-int tpm_fe_register_receiver(struct tpmfe_device *);
-void tpm_fe_unregister_receiver(void);
-
-#endif

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.