[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v3 0/3] map grant refs at pfn = mfn



Thomas, can you give this patch a quick try? I only compile test it
because I don't have handy testing environment at the moment.

Apply it to Dom0 kernel.

---8<---
From b32aa53ba8b79f0006780549953510930513a3ac Mon Sep 17 00:00:00 2001
From: Wei Liu <wei.liu2@xxxxxxxxxx>
Date: Wed, 6 Aug 2014 16:50:06 +0100
Subject: [PATCH] xen-netback: don't stop kthreads until all in-flight packets
 are processed

Reference count the number of packets in host stack, so that we don't
stop the deallocation thread too early. If not, we can end up with
xenvif_free permanently waiting for deallocation thread to unmap grefs.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 drivers/net/xen-netback/common.h    |    5 +++++
 drivers/net/xen-netback/interface.c |   16 ++++++++++++++++
 drivers/net/xen-netback/netback.c   |    2 ++
 3 files changed, 23 insertions(+)

diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 2532ce8..e2b5734 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -164,6 +164,8 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        u16 dealloc_ring[MAX_PENDING_REQS];
        struct task_struct *dealloc_task;
        wait_queue_head_t dealloc_wq;
+       atomic_t inflight_packets; /* number of packets in host network stack */
+       wait_queue_head_t inflight_wq;
 
        /* Use kthread for guest RX */
        struct task_struct *task;
@@ -297,6 +299,9 @@ static inline pending_ring_idx_t nr_pending_reqs(struct 
xenvif_queue *queue)
 /* Callback from stack when TX packet can be released */
 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
 
+void xenvif_inc_inflight_packets(struct xenvif_queue *queue);
+void xenvif_dec_inflight_packets(struct xenvif_queue *queue);
+
 extern bool separate_tx_rx_irq;
 
 extern unsigned int rx_drain_timeout_msecs;
diff --git a/drivers/net/xen-netback/interface.c 
b/drivers/net/xen-netback/interface.c
index 91dad80..84aa863 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,17 @@
 #define XENVIF_QUEUE_LENGTH 32
 #define XENVIF_NAPI_WEIGHT  64
 
+void xenvif_inc_inflight_packets(struct xenvif_queue *queue)
+{
+       atomic_inc(&queue->inflight_packets);
+}
+
+void xenvif_dec_inflight_packets(struct xenvif_queue *queue)
+{
+       if (atomic_dec_and_test(&queue->inflight_packets))
+               wake_up(&queue->inflight_wq);
+}
+
 static inline void xenvif_stop_queue(struct xenvif_queue *queue)
 {
        struct net_device *dev = queue->vif->dev;
@@ -548,6 +559,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned 
long tx_ring_ref,
 
        init_waitqueue_head(&queue->wq);
        init_waitqueue_head(&queue->dealloc_wq);
+       init_waitqueue_head(&queue->inflight_wq);
+       atomic_set(&queue->inflight_packets, 0);
 
        if (tx_evtchn == rx_evtchn) {
                /* feature-split-event-channels == 0 */
@@ -662,6 +675,9 @@ void xenvif_disconnect(struct xenvif *vif)
        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
                queue = &vif->queues[queue_index];
 
+               wait_event(queue->inflight_wq,
+                          atomic_read(&queue->inflight_packets) == 0);
+
                if (queue->task) {
                        del_timer_sync(&queue->wake_queue);
                        kthread_stop(queue->task);
diff --git a/drivers/net/xen-netback/netback.c 
b/drivers/net/xen-netback/netback.c
index c65b636..59c90619 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1645,6 +1645,7 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
                        queue->stats.tx_zerocopy_sent++;
                }
 
+               xenvif_inc_inflight_packets(queue);
                netif_receive_skb(skb);
        }
 
@@ -1681,6 +1682,7 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, 
bool zerocopy_success)
                queue->stats.tx_zerocopy_success++;
        else
                queue->stats.tx_zerocopy_fail++;
+       xenvif_dec_inflight_packets(queue);
 }
 
 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.