[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix netfront receive path for auto_translate_physmap mode.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 3dde684846837f45f83bb254b755bf2632e8cbcf
# Parent  066ac36725f3993967425f4d5114605fab8e96c2
Fix netfront receive path for auto_translate_physmap mode.

Signed-off-by: Steven Smith <sos22@xxxxxxxxx>
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r 066ac36725f3 -r 3dde68484683 
linux-2.6-xen-sparse/drivers/xen/core/gnttab.c
--- a/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c    Thu Feb 23 13:50:00 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/core/gnttab.c    Thu Feb 23 14:22:12 2006
@@ -222,25 +222,22 @@
 }
 
 int
-gnttab_grant_foreign_transfer(domid_t domid)
+gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 {
        int ref;
 
        if (unlikely((ref = get_free_entry()) == -1))
                return -ENOSPC;
-
-       shared[ref].frame = 0;
-       shared[ref].domid = domid;
-       wmb();
-       shared[ref].flags = GTF_accept_transfer;
+       gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 
        return ref;
 }
 
 void
-gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid)
-{
-       shared[ref].frame = 0;
+gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
+                                 unsigned long pfn)
+{
+       shared[ref].frame = pfn;
        shared[ref].domid = domid;
        wmb();
        shared[ref].flags = GTF_accept_transfer;
diff -r 066ac36725f3 -r 3dde68484683 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Feb 23 
13:50:00 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Thu Feb 23 
14:22:12 2006
@@ -587,25 +587,23 @@
                BUG_ON((signed short)ref < 0);
                np->grant_rx_ref[id] = ref;
                gnttab_grant_foreign_transfer_ref(ref,
-                                                 np->xbdev->otherend_id);
+                                                 np->xbdev->otherend_id,
+                                                 __pa(skb->head) >> 
PAGE_SHIFT);
                RING_GET_REQUEST(&np->rx, req_prod + i)->gref = ref;
                rx_pfn_array[i] = virt_to_mfn(skb->head);
 
-               /* Remove this page from map before passing back to Xen. */
-               set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
-                                   INVALID_P2M_ENTRY);
-
-               MULTI_update_va_mapping(rx_mcl+i, (unsigned long)skb->head,
-                                       __pte(0), 0);
-       }
-
-       /* After all PTEs have been zapped we blow away stale TLB entries. */
-       rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
-
-       /* Give away a batch of pages. */
-       rx_mcl[i].op = __HYPERVISOR_memory_op;
-       rx_mcl[i].args[0] = XENMEM_decrease_reservation;
-       rx_mcl[i].args[1] = (unsigned long)&reservation;
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       /* Remove this page before passing back to Xen. */
+                       set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
+                                           INVALID_P2M_ENTRY);
+                       MULTI_update_va_mapping(rx_mcl+i,
+                                               (unsigned long)skb->head,
+                                               __pte(0), 0);
+               }
+       }
+
+       /* Tell the ballon driver what is going on. */
+       balloon_update_driver_allowance(i);
 
        reservation.extent_start = rx_pfn_array;
        reservation.nr_extents   = i;
@@ -613,15 +611,27 @@
        reservation.address_bits = 0;
        reservation.domid        = DOMID_SELF;
 
-       /* Tell the ballon driver what is going on. */
-       balloon_update_driver_allowance(i);
-
-       /* Zap PTEs and give away pages in one big multicall. */
-       (void)HYPERVISOR_multicall(rx_mcl, i+1);
-
-       /* Check return status of HYPERVISOR_memory_op(). */
-       if (unlikely(rx_mcl[i].result != i))
-               panic("Unable to reduce memory reservation\n");
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               /* After all PTEs have been zapped, flush the TLB. */
+               rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
+                       UVMF_TLB_FLUSH|UVMF_ALL;
+
+               /* Give away a batch of pages. */
+               rx_mcl[i].op = __HYPERVISOR_memory_op;
+               rx_mcl[i].args[0] = XENMEM_decrease_reservation;
+               rx_mcl[i].args[1] = (unsigned long)&reservation;
+
+               /* Zap PTEs and give away pages in one big multicall. */
+               (void)HYPERVISOR_multicall(rx_mcl, i+1);
+
+               /* Check return status of HYPERVISOR_memory_op(). */
+               if (unlikely(rx_mcl[i].result != i))
+                       panic("Unable to reduce memory reservation\n");
+       } else {
+               if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
+                                        &reservation) != i)
+                       panic("Unable to reduce memory reservation\n");
+       }
 
        /* Above is a suitable barrier to ensure backend will see requests. */
        np->rx.req_prod_pvt = req_prod + i;
@@ -802,17 +812,19 @@
                np->stats.rx_packets++;
                np->stats.rx_bytes += rx->status;
 
-               /* Remap the page. */
-               MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
-                                       pfn_pte_ma(mfn, PAGE_KERNEL), 0);
-               mcl++;
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       /* Remap the page. */
+                       MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
+                                               pfn_pte_ma(mfn, PAGE_KERNEL),
+                                               0);
+                       mcl++;
                        mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
                                | MMU_MACHPHYS_UPDATE;
                        mmu->val = __pa(skb->head) >> PAGE_SHIFT;
                        mmu++;
 
-                       set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT, mfn);
+                       set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
+                                           mfn);
                }
 
                __skb_queue_tail(&rxq, skb);
@@ -1003,7 +1015,8 @@
                if ((unsigned long)np->rx_skbs[i] < __PAGE_OFFSET)
                        continue;
                gnttab_grant_foreign_transfer_ref(
-                       np->grant_rx_ref[i], np->xbdev->otherend_id);
+                       np->grant_rx_ref[i], np->xbdev->otherend_id,
+                       __pa(np->rx_skbs[i]->data) >> PAGE_SHIFT);
                RING_GET_REQUEST(&np->rx, requeue_idx)->gref =
                        np->grant_rx_ref[i];
                RING_GET_REQUEST(&np->rx, requeue_idx)->id = i;
diff -r 066ac36725f3 -r 3dde68484683 linux-2.6-xen-sparse/include/xen/gnttab.h
--- a/linux-2.6-xen-sparse/include/xen/gnttab.h Thu Feb 23 13:50:00 2006
+++ b/linux-2.6-xen-sparse/include/xen/gnttab.h Thu Feb 23 14:22:12 2006
@@ -71,7 +71,7 @@
 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
                               unsigned long page);
 
-int gnttab_grant_foreign_transfer(domid_t domid);
+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
 
 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
@@ -98,7 +98,8 @@
 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
                                     unsigned long frame, int readonly);
 
-void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid);
+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
+                                      unsigned long pfn);
 
 #ifdef __ia64__
 #define gnttab_map_vaddr(map) __va(map.dev_bus_addr)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.