Re: [PATCH v5 bpf-next 6/6] xsk: build skb by page (aka generic zerocopy xmit)

From: Alexander Lobakin
Date: Tue Feb 16 2021 - 12:12:51 EST


From: Alexander Lobakin <alobakin@xxxxx>
Date: Tue, 16 Feb 2021 14:35:02 +0000

> From: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
>
> This patch is used to construct skb based on page to save memory copy
> overhead.
>
> This function is implemented based on IFF_TX_SKB_NO_LINEAR. Only the
> network card priv_flags supports IFF_TX_SKB_NO_LINEAR will use page to
> directly construct skb. If this feature is not supported, it is still
> necessary to copy data to construct skb.
>
> ---------------- Performance Testing ------------
>
> The test environment is Aliyun ECS server.
> Test cmd:
> ```
> xdpsock -i eth0 -t -S -s <msg size>
> ```
>
> Test result data:
>
> size 64 512 1024 1500
> copy 1916747 1775988 1600203 1440054
> page 1974058 1953655 1945463 1904478
> percent 3.0% 10.0% 21.58% 32.3%
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@xxxxxxxxxxxxxxxxx>
> Reviewed-by: Dust Li <dust.li@xxxxxxxxxxxxxxxxx>
> [ alobakin:
> - expand subject to make it clearer;
> - improve skb->truesize calculation;
> - reserve some headroom in skb for drivers;
> - tailroom is not needed as skb is non-linear ]
> Signed-off-by: Alexander Lobakin <alobakin@xxxxx>
> Acked-by: Magnus Karlsson <magnus.karlsson@xxxxxxxxx>
> ---
> net/xdp/xsk.c | 119 ++++++++++++++++++++++++++++++++++++++++----------
> 1 file changed, 95 insertions(+), 24 deletions(-)
>
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index 143979ea4165..ff7bd06e1241 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -445,6 +445,96 @@ static void xsk_destruct_skb(struct sk_buff *skb)
> sock_wfree(skb);
> }
>
> +static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
> + struct xdp_desc *desc)
> +{
> + struct xsk_buff_pool *pool = xs->pool;
> + u32 hr, len, offset, copy, copied;
> + struct sk_buff *skb;
> + struct page *page;
> + void *buffer;
> + int err, i;
> + u64 addr;
> +
> + hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
> +
> + skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
> + if (unlikely(!skb))
> + return ERR_PTR(err);
> +
> + skb_reserve(skb, hr);
> +
> + addr = desc->addr;
> + len = desc->len;
> +
> + buffer = xsk_buff_raw_get_data(pool, addr);
> + offset = offset_in_page(buffer);
> + addr = buffer - pool->addrs;
> +
> + for (copied = 0, i = 0; copied < len; i++) {
> + page = pool->umem->pgs[addr >> PAGE_SHIFT];
> + get_page(page);
> +
> + copy = min_t(u32, PAGE_SIZE - offset, len - copied);
> + skb_fill_page_desc(skb, i, page, offset, copy);
> +
> + copied += copy;
> + addr += copy;
> + offset = 0;
> + }
> +
> + skb->len += len;
> + skb->data_len += len;
> + skb->truesize += pool->unaligned ? len : pool->chunk_size;
> +
> + refcount_add(skb->truesize, &xs->sk.sk_wmem_alloc);

Meh, there's a refcount leak here I accidentally introduced in v4.
Sorry for that, I'll upload v6 in just a moment.

> + return skb;
> +}
> +
> +static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
> + struct xdp_desc *desc)
> +{
> + struct net_device *dev = xs->dev;
> + struct sk_buff *skb;
> +
> + if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
> + skb = xsk_build_skb_zerocopy(xs, desc);
> + if (IS_ERR(skb))
> + return skb;
> + } else {
> + u32 hr, tr, len;
> + void *buffer;
> + int err;
> +
> + hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
> + tr = dev->needed_tailroom;
> + len = desc->len;
> +
> + skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
> + if (unlikely(!skb))
> + return ERR_PTR(err);
> +
> + skb_reserve(skb, hr);
> + skb_put(skb, len);
> +
> + buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
> + err = skb_store_bits(skb, 0, buffer, len);
> + if (unlikely(err)) {
> + kfree_skb(skb);
> + return ERR_PTR(err);
> + }
> + }
> +
> + skb->dev = dev;
> + skb->priority = xs->sk.sk_priority;
> + skb->mark = xs->sk.sk_mark;
> + skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr;
> + skb->destructor = xsk_destruct_skb;
> +
> + return skb;
> +}
> +
> static int xsk_generic_xmit(struct sock *sk)
> {
> struct xdp_sock *xs = xdp_sk(sk);
> @@ -454,56 +544,37 @@ static int xsk_generic_xmit(struct sock *sk)
> struct sk_buff *skb;
> unsigned long flags;
> int err = 0;
> - u32 hr, tr;
>
> mutex_lock(&xs->mutex);
>
> if (xs->queue_id >= xs->dev->real_num_tx_queues)
> goto out;
>
> - hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
> - tr = xs->dev->needed_tailroom;
> -
> while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
> - char *buffer;
> - u64 addr;
> - u32 len;
> -
> if (max_batch-- == 0) {
> err = -EAGAIN;
> goto out;
> }
>
> - len = desc.len;
> - skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
> - if (unlikely(!skb))
> + skb = xsk_build_skb(xs, &desc);
> + if (IS_ERR(skb)) {
> + err = PTR_ERR(skb);
> goto out;
> + }
>
> - skb_reserve(skb, hr);
> - skb_put(skb, len);
> -
> - addr = desc.addr;
> - buffer = xsk_buff_raw_get_data(xs->pool, addr);
> - err = skb_store_bits(skb, 0, buffer, len);
> /* This is the backpressure mechanism for the Tx path.
> * Reserve space in the completion queue and only proceed
> * if there is space in it. This avoids having to implement
> * any buffering in the Tx path.
> */
> spin_lock_irqsave(&xs->pool->cq_lock, flags);
> - if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
> + if (xskq_prod_reserve(xs->pool->cq)) {
> spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
> kfree_skb(skb);
> goto out;
> }
> spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
>
> - skb->dev = xs->dev;
> - skb->priority = sk->sk_priority;
> - skb->mark = sk->sk_mark;
> - skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
> - skb->destructor = xsk_destruct_skb;
> -
> err = __dev_direct_xmit(skb, xs->queue_id);
> if (err == NETDEV_TX_BUSY) {
> /* Tell user-space to retry the send */
> --
> 2.30.1

Al