[PATCH] virtio-net: byte queue limit support

From: Jason Wang
Date: Tue Nov 20 2018 - 01:25:30 EST


Signed-off-by: Jason Wang <jasowang@xxxxxxxxxx>
---
drivers/net/virtio_net.c | 46 ++++++++++++++++++++++++++++++++++++++--------
1 file changed, 38 insertions(+), 8 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 47979fc..8712c11 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -279,6 +279,14 @@ static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
}

+
+static inline int *skb_cb_bql(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct virtio_net_hdr_mrg_rxbuf) +
+ sizeof(int) > sizeof(skb->cb));
+ return (int *)(skb->cb + sizeof(struct virtio_net_hdr_mrg_rxbuf));
+}
+
/*
* private is used to chain pages for big packets, put the whole
* most recent used list in the beginning for reuse
@@ -1325,12 +1333,14 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
return stats.packets;
}

-static void free_old_xmit_skbs(struct send_queue *sq)
+static void free_old_xmit_skbs(struct send_queue *sq,
+ struct netdev_queue *txq)
{
struct sk_buff *skb;
unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
+ unsigned int packets = 0, bql_packets = 0;
+ unsigned int bytes = 0, bql_bytes = 0;
+ int *bql;

while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
@@ -1338,6 +1348,12 @@ static void free_old_xmit_skbs(struct send_queue *sq)
bytes += skb->len;
packets++;

+ bql = skb_cb_bql(skb);
+ if (*bql) {
+ bql_packets ++;
+ bql_bytes += skb->len;
+ }
+
dev_consume_skb_any(skb);
}

@@ -1351,6 +1367,8 @@ static void free_old_xmit_skbs(struct send_queue *sq)
sq->stats.bytes += bytes;
sq->stats.packets += packets;
u64_stats_update_end(&sq->stats.syncp);
+
+ netdev_tx_completed_queue(txq, bql_packets, bql_bytes);
}

static void virtnet_poll_cleantx(struct receive_queue *rq)
@@ -1364,7 +1382,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
return;

if (__netif_tx_trylock(txq)) {
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, txq);
__netif_tx_unlock(txq);
}

@@ -1440,7 +1458,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));

__netif_tx_lock(txq, raw_smp_processor_id());
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, txq);
__netif_tx_unlock(txq);

virtqueue_napi_complete(napi, sq->vq, 0);
@@ -1459,6 +1477,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
int num_sg;
unsigned hdr_len = vi->hdr_len;
bool can_push;
+ int *bql = skb_cb_bql(skb);

pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);

@@ -1495,6 +1514,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
return num_sg;
num_sg++;
}
+
+ *bql = sq->napi.weight ? 1 : 0;
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}

@@ -1509,7 +1530,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
bool use_napi = sq->napi.weight;

/* Free up any pending old buffers before queueing new ones. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, txq);

if (use_napi && kick)
virtqueue_enable_cb_delayed(sq->vq);
@@ -1537,6 +1558,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
}

+ if (use_napi)
+ netdev_tx_sent_queue(txq, skb->len);
+
/* If running out of space, stop queue to avoid getting packets that we
* are then unable to transmit.
* An alternative would be to force queuing layer to requeue the skb by
@@ -1552,7 +1576,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!use_napi &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq);
+ free_old_xmit_skbs(sq, txq);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@@ -2275,8 +2299,14 @@ static void virtnet_freeze_down(struct virtio_device *vdev)

if (netif_running(vi->dev)) {
for (i = 0; i < vi->max_queue_pairs; i++) {
+ struct send_queue *sq = &vi->sq[i];
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(vi->dev, i);
+
napi_disable(&vi->rq[i].napi);
- virtnet_napi_tx_disable(&vi->sq[i].napi);
+ virtnet_napi_tx_disable(&sq->napi);
+ if (sq->napi.weight)
+ netdev_tx_reset_queue(txq);
}
}
}
--
1.8.3.1


--------------43F548859632C22C685A62DF--