[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 10/17] x86/shadow: use get_unsafe() instead of copy_from_unsafe()



This is the slightly more direct way of getting at what we want, and
better in line with shadow_write_entries()'s use of put_unsafe().

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2614,10 +2614,9 @@ static int sh_page_fault(struct vcpu *v,
         {
             shadow_l2e_t sl2e;
             mfn_t gl1mfn;
-            if ( (copy_from_unsafe(&sl2e,
-                                   (sh_linear_l2_table(v)
-                                    + shadow_l2_linear_offset(va)),
-                                   sizeof(sl2e)) != 0)
+            if ( (get_unsafe(sl2e,
+                             (sh_linear_l2_table(v) +
+                              shadow_l2_linear_offset(va))) != 0)
                  || !(shadow_l2e_get_flags(sl2e) & _PAGE_PRESENT)
                  || !mfn_valid(gl1mfn = backpointer(mfn_to_page(
                                   shadow_l2e_get_mfn(sl2e))))
@@ -2633,10 +2632,9 @@ static int sh_page_fault(struct vcpu *v,
 #endif /* SHOPT_OUT_OF_SYNC */
         /* The only reasons for reserved bits to be set in shadow entries
          * are the two "magic" shadow_l1e entries. */
-        if ( likely((copy_from_unsafe(&sl1e,
-                                      (sh_linear_l1_table(v)
-                                       + shadow_l1_linear_offset(va)),
-                                      sizeof(sl1e)) == 0)
+        if ( likely((get_unsafe(sl1e,
+                                (sh_linear_l1_table(v) +
+                                 shadow_l1_linear_offset(va))) == 0)
                     && sh_l1e_is_magic(sl1e)) )
         {
 
@@ -3311,9 +3309,9 @@ static bool sh_invlpg(struct vcpu *v, un
         /* This must still be a copy-from-unsafe because we don't have the
          * paging lock, and the higher-level shadows might disappear
          * under our feet. */
-        if ( copy_from_unsafe(&sl3e, (sh_linear_l3_table(v)
-                                      + shadow_l3_linear_offset(linear)),
-                              sizeof (sl3e)) != 0 )
+        if ( get_unsafe(sl3e,
+                        (sh_linear_l3_table(v) +
+                         shadow_l3_linear_offset(linear))) != 0 )
         {
             perfc_incr(shadow_invlpg_fault);
             return false;
@@ -3332,9 +3330,9 @@ static bool sh_invlpg(struct vcpu *v, un
 
     /* This must still be a copy-from-unsafe because we don't have the shadow
      * lock, and the higher-level shadows might disappear under our feet. */
-    if ( copy_from_unsafe(&sl2e,
-                          sh_linear_l2_table(v) + 
shadow_l2_linear_offset(linear),
-                          sizeof (sl2e)) != 0 )
+    if ( get_unsafe(sl2e,
+                    (sh_linear_l2_table(v) +
+                     shadow_l2_linear_offset(linear))) != 0 )
     {
         perfc_incr(shadow_invlpg_fault);
         return false;
@@ -3375,10 +3373,9 @@ static bool sh_invlpg(struct vcpu *v, un
              * have the paging lock last time we checked, and the
              * higher-level shadows might have disappeared under our
              * feet. */
-            if ( copy_from_unsafe(&sl2e,
-                                  sh_linear_l2_table(v)
-                                  + shadow_l2_linear_offset(linear),
-                                  sizeof (sl2e)) != 0 )
+            if ( get_unsafe(sl2e,
+                            (sh_linear_l2_table(v) +
+                             shadow_l2_linear_offset(linear))) != 0 )
             {
                 perfc_incr(shadow_invlpg_fault);
                 paging_unlock(d);




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.