[PATCH 2/2] net: fec: add xdp and page pool statistics
From: Shenwei Wang
Date: Mon Nov 07 2022 - 09:39:29 EST
Added xdp and page pool statistics.
In order to make the implementation simple and compatible, the patch
uses the 32bit integer to record the XDP statistics.
Signed-off-by: Shenwei Wang <shenwei.wang@xxxxxxx>
---
drivers/net/ethernet/freescale/fec.h | 12 ++++
drivers/net/ethernet/freescale/fec_main.c | 70 ++++++++++++++++++++++-
2 files changed, 79 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 61e847b18343..e3159234886c 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -526,6 +526,17 @@ struct fec_enet_priv_txrx_info {
struct sk_buff *skb;
};
+enum {
+ RX_XDP_REDIRECT = 0,
+ RX_XDP_PASS,
+ RX_XDP_DROP,
+ RX_XDP_TX,
+ RX_XDP_TX_ERRORS,
+ TX_XDP_XMIT,
+ TX_XDP_XMIT_ERRORS,
+ XDP_STATS_TOTAL,
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -546,6 +557,7 @@ struct fec_enet_priv_rx_q {
/* page_pool */
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
+ u32 stats[XDP_STATS_TOTAL];
/* rx queue number, in the range 0-7 */
u8 id;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3fb870340c22..89fef370bc10 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1523,10 +1523,12 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
switch (act) {
case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
ret = FEC_ENET_XDP_PASS;
break;
case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
err = xdp_do_redirect(fep->netdev, xdp, prog);
if (!err) {
ret = FEC_ENET_XDP_REDIR;
@@ -1549,6 +1551,7 @@ fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
ret = FEC_ENET_XDP_CONSUMED;
page = virt_to_head_page(xdp->data);
page_pool_put_page(rxq->page_pool, page, sync, true);
@@ -2657,37 +2660,91 @@ static const struct fec_stat {
{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
};
-#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
+static struct fec_xdp_stat {
+ char name[ETH_GSTRING_LEN];
+ u64 count;
+} fec_xdp_stats[XDP_STATS_TOTAL] = {
+ { "rx_xdp_redirect", 0 }, /* RX_XDP_REDIRECT = 0, */
+ { "rx_xdp_pass", 0 }, /* RX_XDP_PASS, */
+ { "rx_xdp_drop", 0 }, /* RX_XDP_DROP, */
+ { "rx_xdp_tx", 0 }, /* RX_XDP_TX, */
+ { "rx_xdp_tx_errors", 0 }, /* RX_XDP_TX_ERRORS, */
+ { "tx_xdp_xmit", 0 }, /* TX_XDP_XMIT, */
+ { "tx_xdp_xmit_errors", 0 }, /* TX_XDP_XMIT_ERRORS, */
+};
+
+#define FEC_STATS_SIZE ((ARRAY_SIZE(fec_stats) + \
+ ARRAY_SIZE(fec_xdp_stats)) * sizeof(u64))
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- int i;
+ struct fec_xdp_stat xdp_stats[7];
+ int off = ARRAY_SIZE(fec_stats);
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ xdp_stats[j].count += rxq->stats[j];
+ }
+
+ for (i = 0; i < XDP_STATS_TOTAL; i++)
+ fep->ethtool_stats[i + off] = xdp_stats[i].count;
+}
+
+static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ if (!rxq->page_pool)
+ continue;
+
+ page_pool_get_stats(rxq->page_pool, &stats);
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
}
static void fec_enet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ u64 *dst = data + FEC_STATS_SIZE / 8;
if (netif_running(dev))
fec_enet_update_ethtool_stats(dev);
memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+
+ fec_enet_page_pool_stats(fep, dst);
}
static void fec_enet_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
+ int off = ARRAY_SIZE(fec_stats);
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
fec_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(fec_xdp_stats); i++)
+ memcpy(data + (i + off) * ETH_GSTRING_LEN,
+ fec_xdp_stats[i].name, ETH_GSTRING_LEN);
+ off = (i + off) * ETH_GSTRING_LEN;
+ page_pool_ethtool_stats_get_strings(data + off);
+
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -2697,9 +2754,14 @@ static void fec_enet_get_strings(struct net_device *netdev,
static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
+ int count;
+
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(fec_stats);
+ count = ARRAY_SIZE(fec_stats) + ARRAY_SIZE(fec_xdp_stats);
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+
case ETH_SS_TEST:
return net_selftest_get_count();
default:
@@ -2718,6 +2780,8 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
writel(0, fep->hwp + fec_stats[i].offset);
+ for (i = 0; i < ARRAY_SIZE(fec_xdp_stats); i++)
+ fec_xdp_stats[i].count = 0;
/* Don't disable MIB statistics counters */
writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
}
--
2.34.1