[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC net-next io_uring 04/11] net: shrink struct ubuf_info



We can benefit from a smaller struct ubuf_info, so leave only mandatory
fields and let users to decide how they want to extend it. Convert
MSG_ZEROCOPY to struct ubuf_info_msgzc and remove duplicated fields.
This reduces the size from 48 bytes to just 16.

Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
---
 include/linux/skbuff.h | 22 ++++------------------
 net/core/skbuff.c      | 38 +++++++++++++++++++++-----------------
 net/ipv4/ip_output.c   |  2 +-
 net/ipv4/tcp.c         |  2 +-
 net/ipv6/ip6_output.c  |  2 +-
 5 files changed, 28 insertions(+), 38 deletions(-)

diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f8ac3678dab8..afd7400d7f62 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -533,25 +533,8 @@ enum {
 struct ubuf_info {
        void (*callback)(struct sk_buff *, struct ubuf_info *,
                         bool zerocopy_success);
-       union {
-               struct {
-                       unsigned long desc;
-                       void *ctx;
-               };
-               struct {
-                       u32 id;
-                       u16 len;
-                       u16 zerocopy:1;
-                       u32 bytelen;
-               };
-       };
        refcount_t refcnt;
        u8 flags;
-
-       struct mmpin {
-               struct user_struct *user;
-               unsigned int num_pg;
-       } mmp;
 };
 
 struct ubuf_info_msgzc {
@@ -570,7 +553,10 @@ struct ubuf_info_msgzc {
                };
        };
 
-       struct mmpin mmp;
+       struct mmpin {
+               struct user_struct *user;
+               unsigned int num_pg;
+       } mmp;
 };
 
 #define skb_uarg(SKB)  ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 974bbbbe7138..b047a773acd7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1183,7 +1183,7 @@ EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
 
 static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
 {
-       struct ubuf_info *uarg;
+       struct ubuf_info_msgzc *uarg;
        struct sk_buff *skb;
 
        WARN_ON_ONCE(!in_task());
@@ -1201,19 +1201,19 @@ static struct ubuf_info *msg_zerocopy_alloc(struct sock 
*sk, size_t size)
                return NULL;
        }
 
-       uarg->callback = msg_zerocopy_callback;
+       uarg->ubuf.callback = msg_zerocopy_callback;
        uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
        uarg->len = 1;
        uarg->bytelen = size;
        uarg->zerocopy = 1;
-       uarg->flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
-       refcount_set(&uarg->refcnt, 1);
+       uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
+       refcount_set(&uarg->ubuf.refcnt, 1);
        sock_hold(sk);
 
-       return uarg;
+       return &uarg->ubuf;
 }
 
-static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
+static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg)
 {
        return container_of((void *)uarg, struct sk_buff, cb);
 }
@@ -1222,6 +1222,7 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, 
size_t size,
                                       struct ubuf_info *uarg)
 {
        if (uarg) {
+               struct ubuf_info_msgzc *uarg_zc;
                const u32 byte_limit = 1 << 19;         /* limit to a few TSO */
                u32 bytelen, next;
 
@@ -1237,8 +1238,9 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, 
size_t size,
                        return NULL;
                }
 
-               bytelen = uarg->bytelen + size;
-               if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
+               uarg_zc = uarg_to_msgzc(uarg);
+               bytelen = uarg_zc->bytelen + size;
+               if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) {
                        /* TCP can create new skb to attach new uarg */
                        if (sk->sk_type == SOCK_STREAM)
                                goto new_alloc;
@@ -1246,11 +1248,11 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, 
size_t size,
                }
 
                next = (u32)atomic_read(&sk->sk_zckey);
-               if ((u32)(uarg->id + uarg->len) == next) {
-                       if (mm_account_pinned_pages(&uarg->mmp, size))
+               if ((u32)(uarg_zc->id + uarg_zc->len) == next) {
+                       if (mm_account_pinned_pages(&uarg_zc->mmp, size))
                                return NULL;
-                       uarg->len++;
-                       uarg->bytelen = bytelen;
+                       uarg_zc->len++;
+                       uarg_zc->bytelen = bytelen;
                        atomic_set(&sk->sk_zckey, ++next);
 
                        /* no extra ref when appending to datagram (MSG_MORE) */
@@ -1286,7 +1288,7 @@ static bool skb_zerocopy_notify_extend(struct sk_buff 
*skb, u32 lo, u16 len)
        return true;
 }
 
-static void __msg_zerocopy_callback(struct ubuf_info *uarg)
+static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
 {
        struct sk_buff *tail, *skb = skb_from_uarg(uarg);
        struct sock_exterr_skb *serr;
@@ -1339,19 +1341,21 @@ static void __msg_zerocopy_callback(struct ubuf_info 
*uarg)
 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
                           bool success)
 {
-       uarg->zerocopy = uarg->zerocopy & success;
+       struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
+
+       uarg_zc->zerocopy = uarg_zc->zerocopy & success;
 
        if (refcount_dec_and_test(&uarg->refcnt))
-               __msg_zerocopy_callback(uarg);
+               __msg_zerocopy_callback(uarg_zc);
 }
 EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
 
 void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
 {
-       struct sock *sk = skb_from_uarg(uarg)->sk;
+       struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk;
 
        atomic_dec(&sk->sk_zckey);
-       uarg->len--;
+       uarg_to_msgzc(uarg)->len--;
 
        if (have_uref)
                msg_zerocopy_callback(NULL, uarg, true);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d7bd1daf022b..546897a4b4fa 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1043,7 +1043,7 @@ static int __ip_append_data(struct sock *sk,
                                paged = true;
                                zc = true;
                        } else {
-                               uarg->zerocopy = 0;
+                               uarg_to_msgzc(uarg)->zerocopy = 0;
                                skb_zcopy_set(skb, uarg, &extra_uref);
                        }
                }
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 970e9a2cca4a..3152da8f4763 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1239,7 +1239,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr 
*msg, size_t size)
                        }
                        zc = sk->sk_route_caps & NETIF_F_SG;
                        if (!zc)
-                               uarg->zerocopy = 0;
+                               uarg_to_msgzc(uarg)->zerocopy = 0;
                }
        }
 
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 897ca4f9b791..6d4f01a0cf6e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1568,7 +1568,7 @@ static int __ip6_append_data(struct sock *sk,
                                paged = true;
                                zc = true;
                        } else {
-                               uarg->zerocopy = 0;
+                               uarg_to_msgzc(uarg)->zerocopy = 0;
                                skb_zcopy_set(skb, uarg, &extra_uref);
                        }
                }
-- 
2.37.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.