[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Solve badness problem when udp_poll() receives fragmented skbuff w/ CONFIG_HIGHMEM



ChangeSet 1.1890, 2005/05/31 00:42:11+01:00, smh22@xxxxxxxxxxxxxxxxxxxx

        Solve badness problem when udp_poll() receives fragmented skbuff w/ 
CONFIG_HIGHMEM 
        Upstream patch (now in -net tree) from Herbert Xu. 
        
        Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
        Signed-off-by: Steven Hand <steven@xxxxxxxxxxxxx>



 udp-frag.patch |   48 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 48 insertions(+)


diff -Nru a/patches/linux-2.6.11/udp-frag.patch 
b/patches/linux-2.6.11/udp-frag.patch
--- /dev/null   Wed Dec 31 16:00:00 196900
+++ b/patches/linux-2.6.11/udp-frag.patch       2005-05-30 20:03:13 -04:00
@@ -0,0 +1,55 @@
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -738,7 +738,7 @@ int udp_ioctl(struct sock *sk, int cmd, 
+                       unsigned long amount;
+ 
+                       amount = 0;
+-                      spin_lock_irq(&sk->sk_receive_queue.lock);
++                      spin_lock_bh(&sk->sk_receive_queue.lock);
+                       skb = skb_peek(&sk->sk_receive_queue);
+                       if (skb != NULL) {
+                               /*
+@@ -748,7 +748,7 @@ int udp_ioctl(struct sock *sk, int cmd, 
+                                */
+                               amount = skb->len - sizeof(struct udphdr);
+                       }
+-                      spin_unlock_irq(&sk->sk_receive_queue.lock);
++                      spin_unlock_bh(&sk->sk_receive_queue.lock);
+                       return put_user(amount, (int __user *)arg);
+               }
+ 
+@@ -848,12 +848,12 @@ csum_copy_err:
+       /* Clear queue. */
+       if (flags&MSG_PEEK) {
+               int clear = 0;
+-              spin_lock_irq(&sk->sk_receive_queue.lock);
++              spin_lock_bh(&sk->sk_receive_queue.lock);
+               if (skb == skb_peek(&sk->sk_receive_queue)) {
+                       __skb_unlink(skb, &sk->sk_receive_queue);
+                       clear = 1;
+               }
+-              spin_unlock_irq(&sk->sk_receive_queue.lock);
++              spin_unlock_bh(&sk->sk_receive_queue.lock);
+               if (clear)
+                       kfree_skb(skb);
+       }
+@@ -1334,7 +1334,7 @@ unsigned int udp_poll(struct file *file,
+               struct sk_buff_head *rcvq = &sk->sk_receive_queue;
+               struct sk_buff *skb;
+ 
+-              spin_lock_irq(&rcvq->lock);
++              spin_lock_bh(&rcvq->lock);
+               while ((skb = skb_peek(rcvq)) != NULL) {
+                       if (udp_checksum_complete(skb)) {
+                               UDP_INC_STATS_BH(UDP_MIB_INERRORS);
+@@ -1345,7 +1345,7 @@ unsigned int udp_poll(struct file *file,
+                               break;
+                       }
+               }
+-              spin_unlock_irq(&rcvq->lock);
++              spin_unlock_bh(&rcvq->lock);
+ 
+               /* nothing to see, move along */
+               if (skb == NULL)
+

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.