[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] Fix DMA emualtion for ia64.



Hi, Ian and Keir
    In Xen3.4-RC3, we found a regression for creating hvm domains and this 
issue is discussed in the thread 
http://www.nabble.com/A-strange-issue-when-VTI-guest-vcpus-number-is-near-pcpus-td23400397.html.
 This patch is a key fix for Xen-3.4. Without this patch, all hvm guests maybe 
crash during booting stage. Could you help to apply it for next release?  
Thanks!
Xiantao


>From 335c33338c5bc315aa2d50b01eeabb7427f4f62c Mon Sep 17 00:00:00 2001
From: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
Date: Fri, 8 May 2009 17:06:24 +0800
Subject: [PATCH] Fix DMA emualtion for ia64.
 
For DMA in native system, operating system depends on platform
flushes icache of memory touched by DMA operations.
But as to virtual DMA of virtual machine, dma emulation code has to
use explicit instructions to flush icahce,otherwise,
guest may use old icache and leads to guest's crash.
 
Signed-off-by: Xiantao Zhang <xiantao.zhang@xxxxxxxxx>
Signed-off-by: Yang Zhang <yang.zhang@xxxxxxxxx>
---
 cache-utils.h |   12 ++++++++++++
 cutils.c      |    7 +++++++
 dma-helpers.c |   14 ++++++++++++++
 3 files changed, 33 insertions(+), 0 deletions(-)
 
diff --git a/cache-utils.h b/cache-utils.h
index b45fde4..561d251 100644
--- a/cache-utils.h
+++ b/cache-utils.h
@@ -34,8 +34,20 @@ static inline void flush_icache_range(unsigned long start, 
unsigned long stop)
     asm volatile ("isync" : : : "memory");
 }
 
+#elif defined (__ia64__)
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+    while (start < stop) {
+     asm volatile ("fc %0" :: "r"(start));
+     start += 32;
+    }
+    asm volatile (";;sync.i;;srlz.i;;");
+}
+
+#define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #else
 #define qemu_cache_utils_init(envp) do { (void) (envp); } while (0)
 #endif
 
+
 #endif /* QEMU_CACHE_UTILS_H */
diff --git a/cutils.c b/cutils.c
index 4541214..5137fe1 100644
--- a/cutils.c
+++ b/cutils.c
@@ -23,6 +23,7 @@
  */
 #include "qemu-common.h"
 #include "host-utils.h"
+#include "cache-utils.h"
 
 void pstrcpy(char *buf, size_t buf_size, const char *str)
 {
@@ -157,6 +158,12 @@ void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void 
*buf, size_t count)
         if (copy > qiov->iov[i].iov_len)
             copy = qiov->iov[i].iov_len;
         memcpy(qiov->iov[i].iov_base, p, copy);
+
+#ifdef __ia64__
+        flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+                  (unsigned long)(qiov->iov[i].iov_base + copy));
+#endif
+
         p     += copy;
         count -= copy;
     }
diff --git a/dma-helpers.c b/dma-helpers.c
index b2ade19..0523dc8 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -9,6 +9,7 @@
 
 #include "dma.h"
 #include "block_int.h"
+#include "cache-utils.h"
 
 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
 {
@@ -135,6 +136,19 @@ static BlockDriverAIOCB *dma_bdrv_io(
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     dma_bdrv_cb(dbs, 0);
+
+#ifdef __ia64__
+    if (!is_write) {
+     int i;
+     QEMUIOVector *qiov;
+        qiov = &dbs->iov;
+        for (i = 0; i < qiov->niov; ++i) {
+           flush_icache_range((unsigned long)qiov->iov[i].iov_base,
+                 (unsigned long)(qiov->iov[i].iov_base + 
qiov->iov[i].iov_len));
+ }
+    }
+#endif
+
     return dbs->acb;
 }
 
-- 
1.5.2.4
 
 

Attachment: 0001-Fix-DMA-emualtion-for-ia64.patch
Description: 0001-Fix-DMA-emualtion-for-ia64.patch

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.