[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen stable-4.5] x86/shadow: account for ioreq server pages before complaining about not found mapping



commit 065b1347b0902fd68291ddc593a0055259793383
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon May 9 13:16:10 2016 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon May 9 13:16:10 2016 +0200

    x86/shadow: account for ioreq server pages before complaining about not 
found mapping
    
    prepare_ring_for_helper(), just like share_xen_page_with_guest(),
    takes a write reference on the page, and hence should similarly be
    accounted for when determining whether to log a complaint.
    
    This requires using recursive locking for the ioreq server lock, as the
    offending invocation of sh_remove_all_mappings() is down the call stack
    from hvm_set_ioreq_server_state(). (While not strictly needed to be
    done in all other instances too, convert all of them for consistency.)
    
    At once improve the usefulness of the shadow error message: Log all
    values involved in triggering it as well as the GFN (to aid
    understanding which guest page it is that there is a problem with - in
    cases like the one here the GFN is invariant across invocations, while
    the MFN obviously can [and will] vary).
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
    master commit: 77eb5dbeff78bbe549793325520f59ab46a187f8
    master date: 2016-05-02 09:20:17 +0200
---
 xen/arch/x86/hvm/hvm.c          | 68 ++++++++++++++++++++++++++++-------------
 xen/arch/x86/mm/shadow/common.c | 31 ++++++++++++-------
 xen/include/asm-x86/hvm/hvm.h   |  1 +
 xen/include/asm-x86/shadow.h    | 12 --------
 4 files changed, 67 insertions(+), 45 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0abc740..d0905e7 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -521,6 +521,30 @@ static int hvm_map_ioreq_page(
     return 0;
 }
 
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
+{
+    const struct hvm_ioreq_server *s;
+    bool_t found = 0;
+
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    list_for_each_entry ( s,
+                          &d->arch.hvm_domain.ioreq_server.list,
+                          list_entry )
+    {
+        if ( (s->ioreq.va && s->ioreq.page == page) ||
+             (s->bufioreq.va && s->bufioreq.page == page) )
+        {
+            found = 1;
+            break;
+        }
+    }
+
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
+
+    return found;
+}
+
 static void hvm_remove_ioreq_gmfn(
     struct domain *d, struct hvm_ioreq_page *iorp)
 {
@@ -1005,7 +1029,7 @@ static int hvm_create_ioreq_server(struct domain *d, 
domid_t domid,
         goto fail1;
 
     domain_pause(d);
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -EEXIST;
     if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
@@ -1028,14 +1052,14 @@ static int hvm_create_ioreq_server(struct domain *d, 
domid_t domid,
     if ( id )
         *id = s->id;
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     return 0;
 
  fail3:
  fail2:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     domain_unpause(d);
 
     xfree(s);
@@ -1048,7 +1072,7 @@ static int hvm_destroy_ioreq_server(struct domain *d, 
ioservid_t id)
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -1077,7 +1101,7 @@ static int hvm_destroy_ioreq_server(struct domain *d, 
ioservid_t id)
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -1090,7 +1114,7 @@ static int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -1115,7 +1139,7 @@ static int hvm_get_ioreq_server_info(struct domain *d, 
ioservid_t id,
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -1126,7 +1150,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain 
*d, ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -1166,7 +1190,7 @@ static int hvm_map_io_range_to_ioreq_server(struct domain 
*d, ioservid_t id,
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -1177,7 +1201,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct 
domain *d, ioservid_t id,
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each_entry ( s,
@@ -1217,7 +1241,7 @@ static int hvm_unmap_io_range_from_ioreq_server(struct 
domain *d, ioservid_t id,
         }
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -1228,7 +1252,7 @@ static int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
     struct list_head *entry;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     rc = -ENOENT;
     list_for_each ( entry,
@@ -1257,7 +1281,7 @@ static int hvm_set_ioreq_server_state(struct domain *d, 
ioservid_t id,
         break;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
@@ -1266,7 +1290,7 @@ static int hvm_all_ioreq_servers_add_vcpu(struct domain 
*d, struct vcpu *v)
     struct hvm_ioreq_server *s;
     int rc;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
@@ -1279,7 +1303,7 @@ static int hvm_all_ioreq_servers_add_vcpu(struct domain 
*d, struct vcpu *v)
             goto fail;
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return 0;
 
@@ -1289,7 +1313,7 @@ static int hvm_all_ioreq_servers_add_vcpu(struct domain 
*d, struct vcpu *v)
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     return rc;
 }
@@ -1298,21 +1322,21 @@ static void hvm_all_ioreq_servers_remove_vcpu(struct 
domain *d, struct vcpu *v)
 {
     struct hvm_ioreq_server *s;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     list_for_each_entry ( s,
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
         hvm_ioreq_server_remove_vcpu(s, v);
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 static void hvm_destroy_all_ioreq_servers(struct domain *d)
 {
     struct hvm_ioreq_server *s, *next;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /* No need to domain_pause() as the domain is being torn down */
 
@@ -1335,7 +1359,7 @@ static void hvm_destroy_all_ioreq_servers(struct domain 
*d)
         xfree(s);
     }
 
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 }
 
 static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
@@ -1358,7 +1382,7 @@ static int hvm_set_dm_domain(struct domain *d, domid_t 
domid)
     struct hvm_ioreq_server *s;
     int rc = 0;
 
-    spin_lock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
     /*
      * Lack of ioreq server is not a failure. HVM_PARAM_DM_DOMAIN will
@@ -1407,7 +1431,7 @@ static int hvm_set_dm_domain(struct domain *d, domid_t 
domid)
     domain_unpause(d);
 
  done:
-    spin_unlock(&d->arch.hvm_domain.ioreq_server.lock);
+    spin_unlock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
     return rc;
 }
 
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 76a56ca..4e9206b 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -2394,7 +2394,8 @@ int sh_remove_write_access_from_sl1p(struct vcpu *v, 
mfn_t gmfn,
 /* Remove all mappings of a guest frame from the shadow tables.
  * Returns non-zero if we need to flush TLBs. */
 
-int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
+static int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn,
+                                  unsigned long gfn)
 {
     struct page_info *page = mfn_to_page(gmfn);
 
@@ -2446,19 +2447,25 @@ int sh_remove_all_mappings(struct vcpu *v, mfn_t gmfn)
     /* If that didn't catch the mapping, something is very wrong */
     if ( !sh_check_page_has_no_refs(page) )
     {
-        /* Don't complain if we're in HVM and there are some extra mappings: 
+        /*
+         * Don't complain if we're in HVM and there are some extra mappings:
          * The qemu helper process has an untyped mapping of this dom's RAM 
          * and the HVM restore program takes another.
-         * Also allow one typed refcount for xenheap pages, to match
-         * share_xen_page_with_guest(). */
+         * Also allow one typed refcount for
+         * - Xen heap pages, to match share_xen_page_with_guest(),
+         * - ioreq server pages, to match prepare_ring_for_helper().
+         */
         if ( !(shadow_mode_external(v->domain)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
-                   == !!is_xen_heap_page(page))) )
+                   == (is_xen_heap_page(page) ||
+                       is_ioreq_server_page(v->domain, page)))) )
         {
-            SHADOW_ERROR("can't find all mappings of mfn %lx: "
-                          "c=%08lx t=%08lx\n", mfn_x(gmfn), 
-                          page->count_info, page->u.inuse.type_info);
+            SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
+                          "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn,
+                          page->count_info, page->u.inuse.type_info,
+                          !!is_xen_heap_page(page),
+                          is_ioreq_server_page(v->domain, page));
         }
     }
 
@@ -3337,7 +3344,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
         if ( (p2m_is_valid(p2mt) || p2m_is_grant(p2mt)) && mfn_valid(mfn) ) 
         {
             sh_remove_all_shadows_and_parents(v, mfn);
-            if ( sh_remove_all_mappings(v, mfn) )
+            if ( sh_remove_all_mappings(v, mfn, gfn) )
                 flush_tlb_mask(d->domain_dirty_cpumask);
         }
     }
@@ -3372,7 +3379,8 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
                 {
                     /* This GFN->MFN mapping has gone away */
                     sh_remove_all_shadows_and_parents(v, omfn);
-                    if ( sh_remove_all_mappings(v, omfn) )
+                    if ( sh_remove_all_mappings(v, omfn,
+                                                gfn + (i << PAGE_SHIFT)) )
                         cpumask_or(&flushmask, &flushmask,
                                    d->domain_dirty_cpumask);
                 }
@@ -3588,7 +3596,8 @@ int shadow_track_dirty_vram(struct domain *d,
                         dirty = 1;
                         /* TODO: Heuristics for finding the single mapping of
                          * this gmfn */
-                        flush_tlb |= sh_remove_all_mappings(d->vcpu[0], mfn);
+                        flush_tlb |= sh_remove_all_mappings(d->vcpu[0], mfn,
+                                                            begin_pfn + i);
                     }
                     else
                     {
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 6a98267..aba040c 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -361,6 +361,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, 
unsigned int *ebx,
 void hvm_migrate_timers(struct vcpu *v);
 bool_t hvm_has_dm(struct domain *d);
 bool_t hvm_io_pending(struct vcpu *v);
+bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page);
 void hvm_do_resume(struct vcpu *v);
 void hvm_migrate_pirqs(struct vcpu *v);
 
diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h
index 44dbbf5..86e4470 100644
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -91,18 +91,6 @@ void shadow_clean_dirty_bitmap(struct domain *d);
  * has changed, and when bringing up a VCPU for the first time. */
 void shadow_update_paging_modes(struct vcpu *v);
 
-
-/* Remove all mappings of the guest page from the shadows. 
- * This is called from common code.  It does not flush TLBs. */
-int sh_remove_all_mappings(struct vcpu *v, mfn_t target_mfn);
-static inline void 
-shadow_drop_references(struct domain *d, struct page_info *p)
-{
-    if ( unlikely(shadow_mode_enabled(d)) )
-        /* See the comment about locking in sh_remove_all_mappings */
-        sh_remove_all_mappings(d->vcpu[0], _mfn(page_to_mfn(p)));
-}
-
 /* Remove all shadows of the guest mfn. */
 void sh_remove_shadows(struct vcpu *v, mfn_t gmfn, int fast, int all);
 static inline void shadow_remove_all_shadows(struct vcpu *v, mfn_t gmfn)
--
generated by git-patchbot for /home/xen/git/xen.git#stable-4.5

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.