[net-next v14 05/12] net: mtip: Add buffers management functions to the L2 switch driver

From: Lukasz Majewski
Date: Tue Jul 01 2025 - 07:58:02 EST


This patch provides buffers management funcions' content for MTIP
L2 switch.

Signed-off-by: Lukasz Majewski <lukma@xxxxxxx>
---
Changes for v14:
- New patch - created by excluding some code from large (i.e. v13
and earlier) MTIP driver
---
.../net/ethernet/freescale/mtipsw/mtipl2sw.c | 85 +++++++++++++++++++
1 file changed, 85 insertions(+)

diff --git a/drivers/net/ethernet/freescale/mtipsw/mtipl2sw.c b/drivers/net/ethernet/freescale/mtipsw/mtipl2sw.c
index 389aa8a20cd4..555d25a8cde0 100644
--- a/drivers/net/ethernet/freescale/mtipsw/mtipl2sw.c
+++ b/drivers/net/ethernet/freescale/mtipsw/mtipl2sw.c
@@ -882,11 +882,96 @@ static void mtip_get_drvinfo(struct net_device *dev,

static void mtip_free_buffers(struct net_device *dev)
{
+ struct mtip_ndev_priv *priv = netdev_priv(dev);
+ struct switch_enet_private *fep = priv->fep;
+ int i;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ page_pool_put_full_page(fep->page_pool,
+ fep->page[i], false);
+ fep->page[i] = NULL;
+ }
+
+ page_pool_destroy(fep->page_pool);
+ fep->page_pool = NULL;
+
+ for (i = 0; i < TX_RING_SIZE; i++)
+ kfree(fep->tx_bounce[i]);
+}
+
+static int mtip_create_page_pool(struct switch_enet_private *fep, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = MTIP_SWITCH_RX_FRSIZE,
+ };
+ int ret = 0;
+
+ fep->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(fep->page_pool)) {
+ ret = PTR_ERR(fep->page_pool);
+ fep->page_pool = NULL;
+ }
+
+ return ret;
}

static int mtip_alloc_buffers(struct net_device *dev)
{
+ struct mtip_ndev_priv *priv = netdev_priv(dev);
+ struct switch_enet_private *fep = priv->fep;
+ struct cbd_t *bdp;
+ struct page *page;
+ int i, ret;
+
+ ret = mtip_create_page_pool(fep, RX_RING_SIZE);
+ if (ret < 0) {
+ dev_err(&fep->pdev->dev, "Failed to create page pool\n");
+ return ret;
+ }
+
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ page = page_pool_dev_alloc_pages(fep->page_pool);
+ if (!page) {
+ dev_err(&fep->pdev->dev,
+ "Failed to allocate page for rx buffer\n");
+ goto err;
+ }
+
+ bdp->cbd_bufaddr = page_pool_get_dma_addr(page);
+ fep->page[i] = page;
+
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ bdp++;
+ }
+
+ mtip_set_last_buf_to_wrap(bdp);
+
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ fep->tx_bounce[i] = kmalloc(MTIP_SWITCH_TX_FRSIZE, GFP_KERNEL);
+ if (!fep->tx_bounce[i])
+ goto err;
+
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ mtip_set_last_buf_to_wrap(bdp);
+
return 0;
+
+ err:
+ mtip_free_buffers(dev);
+ return -ENOMEM;
}

static int mtip_rx_napi(struct napi_struct *napi, int budget)
--
2.39.5