[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] Merge.



# HG changeset patch
# User ssmith@xxxxxxxxxxxxxxxxxxxxxxxxxx
# Node ID 45746c770018bc8ab54b39d5798bfd47eeb73cbc
# Parent  3f568dd6bda6ee77b5343c855f896b6f94a105da
# Parent  65a41e3206ac2e8347a9649d2ffa3d252e9815d2
Merge.
---
 docs/src/user.tex                       |    2 -
 tools/examples/vtpm-common.sh           |    5 +++
 tools/examples/vtpm-impl                |    3 ++
 tools/python/xen/xend/XendDomainInfo.py |   37 ++++++++++++++++----------
 tools/python/xen/xend/image.py          |   45 ++++++++++++++++++++++++--------
 5 files changed, 67 insertions(+), 25 deletions(-)

diff -r 3f568dd6bda6 -r 45746c770018 docs/src/user.tex
--- a/docs/src/user.tex Tue Sep 05 14:27:05 2006 +0100
+++ b/docs/src/user.tex Tue Sep 05 14:28:19 2006 +0100
@@ -1661,7 +1661,7 @@ filled in and consume more space up to t
 filled in and consume more space up to the original 2GB.
 
 {\em{Note:}} Users that have worked with file-backed VBDs on Xen in previous
-versions will be interested to know that this support is not provided through
+versions will be interested to know that this support is now provided through
 the blktap driver instead of the loopback driver.  This change results in
 file-based block devices that are higher-performance, more scalable, and which
 provide better safety properties for VBD data.  All that is required to update
diff -r 3f568dd6bda6 -r 45746c770018 tools/examples/vtpm-common.sh
--- a/tools/examples/vtpm-common.sh     Tue Sep 05 14:27:05 2006 +0100
+++ b/tools/examples/vtpm-common.sh     Tue Sep 05 14:28:19 2006 +0100
@@ -47,6 +47,9 @@ else
        }
        function vtpm_migrate() {
                echo "Error: vTPM migration accross machines not implemented."
+       }
+       function vtpm_migrate_local() {
+               echo "Error: local vTPM migration not supported"
        }
        function vtpm_migrate_recover() {
                true
@@ -353,6 +356,8 @@ function vtpm_migration_step() {
        local res=$(vtpm_isLocalAddress $1)
        if [ "$res" == "0" ]; then
                vtpm_migrate $1 $2 $3
+       else
+               vtpm_migrate_local
        fi
 }
 
diff -r 3f568dd6bda6 -r 45746c770018 tools/examples/vtpm-impl
--- a/tools/examples/vtpm-impl  Tue Sep 05 14:27:05 2006 +0100
+++ b/tools/examples/vtpm-impl  Tue Sep 05 14:28:19 2006 +0100
@@ -184,3 +184,6 @@ function vtpm_migrate_recover() {
  echo "Error: Recovery not supported yet" 
 }
 
+function vtpm_migrate_local() {
+ echo "Error: local vTPM migration not supported"
+}
diff -r 3f568dd6bda6 -r 45746c770018 tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Tue Sep 05 14:27:05 2006 +0100
+++ b/tools/python/xen/xend/XendDomainInfo.py   Tue Sep 05 14:28:19 2006 +0100
@@ -1285,28 +1285,37 @@ class XendDomainInfo:
                 for v in range(0, self.info['max_vcpu_id']+1):
                     xc.vcpu_setaffinity(self.domid, v, self.info['cpus'])
 
+            # Use architecture- and image-specific calculations to determine
+            # the various headrooms necessary, given the raw configured
+            # values.
+            # reservation, maxmem, memory, and shadow are all in KiB.
+            reservation = self.image.getRequiredInitialReservation(
+                self.info['memory'] * 1024)
+            maxmem = self.image.getRequiredAvailableMemory(
+                self.info['maxmem'] * 1024)
+            memory = self.image.getRequiredAvailableMemory(
+                self.info['memory'] * 1024)
+            shadow = self.image.getRequiredShadowMemory(
+                self.info['shadow_memory'] * 1024,
+                self.info['maxmem'] * 1024)
+
+            # Round shadow up to a multiple of a MiB, as shadow_mem_control
+            # takes MiB and we must not round down and end up under-providing.
+            shadow = ((shadow + 1023) / 1024) * 1024
+
             # set memory limit
-            maxmem = self.image.getRequiredMemory(self.info['maxmem'] * 1024)
             xc.domain_setmaxmem(self.domid, maxmem)
 
-            mem_kb = self.image.getRequiredMemory(self.info['memory'] * 1024)
-
-            # get the domain's shadow memory requirement
-            shadow_kb = self.image.getRequiredShadowMemory(mem_kb)
-            shadow_kb_req = self.info['shadow_memory'] * 1024
-            if shadow_kb_req > shadow_kb:
-                shadow_kb = shadow_kb_req
-            shadow_mb = (shadow_kb + 1023) / 1024
-
             # Make sure there's enough RAM available for the domain
-            balloon.free(mem_kb + shadow_mb * 1024)
+            balloon.free(memory + shadow)
 
             # Set up the shadow memory
-            shadow_cur = xc.shadow_mem_control(self.domid, shadow_mb)
+            shadow_cur = xc.shadow_mem_control(self.domid, shadow / 1024)
             self.info['shadow_memory'] = shadow_cur
 
-            # initial memory allocation
-            xc.domain_memory_increase_reservation(self.domid, mem_kb, 0, 0)
+            # initial memory reservation
+            xc.domain_memory_increase_reservation(self.domid, reservation, 0,
+                                                  0)
 
             self.createChannels()
 
diff -r 3f568dd6bda6 -r 45746c770018 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Tue Sep 05 14:27:05 2006 +0100
+++ b/tools/python/xen/xend/image.py    Tue Sep 05 14:28:19 2006 +0100
@@ -143,12 +143,27 @@ class ImageHandler:
             raise VmError('Building domain failed: ostype=%s dom=%d err=%s'
                           % (self.ostype, self.vm.getDomid(), str(result)))
 
-    def getRequiredMemory(self, mem_kb):
+    def getRequiredAvailableMemory(self, mem_kb):
+        """@param mem_kb The configured maxmem or memory, in KiB.
+        @return The corresponding required amount of memory for the domain,
+        also in KiB.  This is normally the given mem_kb, but architecture- or
+        image-specific code may override this to add headroom where
+        necessary."""
         return mem_kb
 
-    def getRequiredShadowMemory(self, mem_kb):
-        """@return The minimum shadow memory required, in KiB, for a domain 
-        with mem_kb KiB of RAM."""
+    def getRequiredInitialReservation(self, mem_kb):
+        """@param mem_kb The configured memory, in KiB.
+        @return The corresponding required amount of memory to be free, also
+        in KiB. This is normally the same as getRequiredAvailableMemory, but
+        architecture- or image-specific code may override this to
+        add headroom where necessary."""
+        return self.getRequiredAvailableMemory(mem_kb)
+
+    def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
+        """@param shadow_mem_kb The configured shadow memory, in KiB.
+        @param maxmem_kb The configured maxmem, in KiB.
+        @return The corresponding required amount of shadow memory, also in
+        KiB."""
         # PV domains don't need any shadow memory
         return 0
 
@@ -418,7 +433,7 @@ class IA64_HVM_ImageHandler(HVMImageHand
 
     ostype = "hvm"
 
-    def getRequiredMemory(self, mem_kb):
+    def getRequiredAvailableMemory(self, mem_kb):
         page_kb = 16
         # ROM size for guest firmware, ioreq page and xenstore page
         extra_pages = 1024 + 2
@@ -432,19 +447,29 @@ class X86_HVM_ImageHandler(HVMImageHandl
 
     ostype = "hvm"
 
-    def getRequiredMemory(self, mem_kb):
+    def getRequiredAvailableMemory(self, mem_kb):
         page_kb = 4
         # This was derived emperically:
-        #   2.4 MB overhead per 1024 MB RAM + 8 MB constant
+        #   2.4 MB overhead per 1024 MB RAM
         #   + 4 to avoid low-memory condition
-        extra_mb = (2.4/1024) * (mem_kb/1024.0) + 12;
+        extra_mb = (2.4/1024) * (mem_kb/1024.0) + 4;
         extra_pages = int( math.ceil( extra_mb*1024 / page_kb ))
         return mem_kb + extra_pages * page_kb
 
-    def getRequiredShadowMemory(self, mem_kb):
+    def getRequiredInitialReservation(self, mem_kb):
+        # Add 8 MiB overhead for QEMU's video RAM.
+        return self.getRequiredAvailableMemory(mem_kb) + 8192
+
+    def getRequiredShadowMemory(self, shadow_mem_kb, maxmem_kb):
+        # The given value is the configured value -- we need to include the
+        # overhead due to getRequiredMemory.
+        maxmem_kb = self.getRequiredMemory(maxmem_kb)
+
         # 1MB per vcpu plus 4Kib/Mib of RAM.  This is higher than 
         # the minimum that Xen would allocate if no value were given.
-        return 1024 * self.vm.getVCpuCount() + mem_kb / 256
+        return max(1024 * self.vm.getVCpuCount() + maxmem_kb / 256,
+                   shadow_mem_kb)
+
 
 _handlers = {
     "powerpc": {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.