Skip to content

Commit

Permalink
Merge branch 'bnxt_en-support-header-page-pool-in-queue-api'
Browse files Browse the repository at this point in the history
David Wei says:

====================
bnxt_en: support header page pool in queue API

Commit 7ed816b ("eth: bnxt: use page pool for head frags") added a
separate page pool for header frags. Now, frags are allocated from this
header page pool e.g. rxr->tpa_info.data.

The queue API did not properly handle rxr->tpa_info and so using the
queue API to i.e. reset any queues will result in pages being returned
to the incorrect page pool, causing inflight != 0 warnings.

Fix this bug by properly allocating/freeing tpa_info and copying/freeing
head_pool in the queue API implementation.

The 1st patch is a prep patch that refactors helpers out to be used by
the implementation patch later.

The 2nd patch is a drive-by refactor. Happy to take it out and re-send
to net-next if there are any objections.

The 3rd patch is the implementation patch that will properly alloc/free
rxr->tpa_info.
====================

Link: https://patch.msgid.link/[email protected]
Signed-off-by: Jakub Kicinski <[email protected]>
  • Loading branch information
kuba-moo committed Dec 5, 2024
2 parents 8588c99 + bd649c5 commit 5f4d035
Showing 1 changed file with 129 additions and 76 deletions.
205 changes: 129 additions & 76 deletions drivers/net/ethernet/broadcom/bnxt/bnxt.c
Original file line number Diff line number Diff line change
Expand Up @@ -3421,15 +3421,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
}
}

static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct bnxt_tpa_idx_map *map;
int i;

if (!rxr->rx_tpa)
goto skip_rx_tpa_free;

for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
Expand All @@ -3440,6 +3436,17 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
tpa_info->data = NULL;
page_pool_free_va(rxr->head_pool, data, false);
}
}

static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct bnxt_tpa_idx_map *map;

if (!rxr->rx_tpa)
goto skip_rx_tpa_free;

bnxt_free_one_tpa_info_data(bp, rxr);

skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
Expand Down Expand Up @@ -3467,7 +3474,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;

for (i = 0; i < bp->rx_nr_rings; i++)
bnxt_free_one_rx_ring_skbs(bp, i);
bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}

static void bnxt_free_skbs(struct bnxt *bp)
Expand Down Expand Up @@ -3608,29 +3615,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}

static void bnxt_free_one_tpa_info(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
int i;

kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) {
for (i = 0; i < bp->max_tpa; i++) {
kfree(rxr->rx_tpa[i].agg_arr);
rxr->rx_tpa[i].agg_arr = NULL;
}
}
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
}

static void bnxt_free_tpa_info(struct bnxt *bp)
{
int i, j;
int i;

for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];

kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) {
for (j = 0; j < bp->max_tpa; j++) {
kfree(rxr->rx_tpa[j].agg_arr);
rxr->rx_tpa[j].agg_arr = NULL;
}
}
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
bnxt_free_one_tpa_info(bp, rxr);
}
}

static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
struct rx_agg_cmp *agg;
int i;

rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
GFP_KERNEL);
if (!rxr->rx_tpa)
return -ENOMEM;

if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return 0;
for (i = 0; i < bp->max_tpa; i++) {
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
if (!agg)
return -ENOMEM;
rxr->rx_tpa[i].agg_arr = agg;
}
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
return -ENOMEM;

return 0;
}

static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
int i, j;
int i, rc;

bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
Expand All @@ -3641,25 +3683,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)

for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct rx_agg_cmp *agg;

rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
GFP_KERNEL);
if (!rxr->rx_tpa)
return -ENOMEM;

if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
for (j = 0; j < bp->max_tpa; j++) {
agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
if (!agg)
return -ENOMEM;
rxr->rx_tpa[j].agg_arr = agg;
}
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
return -ENOMEM;
rc = bnxt_alloc_one_tpa_info(bp, rxr);
if (rc)
return rc;
}
return 0;
}
Expand All @@ -3683,7 +3710,7 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
xdp_rxq_info_unreg(&rxr->xdp_rxq);

page_pool_destroy(rxr->page_pool);
if (rxr->page_pool != rxr->head_pool)
if (bnxt_separate_head_pool())
page_pool_destroy(rxr->head_pool);
rxr->page_pool = rxr->head_pool = NULL;

Expand Down Expand Up @@ -3737,6 +3764,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
return PTR_ERR(pool);
}

static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;

rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
mem_size = rxr->rx_agg_bmap_size / 8;
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;

return 0;
}

static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
int numa_node = dev_to_node(&bp->pdev->dev);
Expand Down Expand Up @@ -3781,19 +3821,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)

ring->grp_idx = i;
if (agg_rings) {
u16 mem_size;

ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;

ring->grp_idx = i;
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
mem_size = rxr->rx_agg_bmap_size / 8;
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;
rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
if (rc)
return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
Expand Down Expand Up @@ -4268,10 +4304,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
rxr->rx_agg_prod = prod;
}

static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr)
{
dma_addr_t mapping;
u8 *data;
int i;

for (i = 0; i < bp->max_tpa; i++) {
data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
GFP_KERNEL);
if (!data)
return -ENOMEM;

rxr->rx_tpa[i].data = data;
rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
rxr->rx_tpa[i].mapping = mapping;
}

return 0;
}

static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
int i;
int rc;

bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);

Expand All @@ -4281,19 +4338,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);

if (rxr->rx_tpa) {
dma_addr_t mapping;
u8 *data;

for (i = 0; i < bp->max_tpa; i++) {
data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
GFP_KERNEL);
if (!data)
return -ENOMEM;

rxr->rx_tpa[i].data = data;
rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
rxr->rx_tpa[i].mapping = mapping;
}
rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
if (rc)
return rc;
}
return 0;
}
Expand Down Expand Up @@ -13663,7 +13710,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
bnxt_free_one_rx_ring_skbs(bp, i);
bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
Expand Down Expand Up @@ -15293,19 +15340,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};

static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
u16 mem_size;

rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
mem_size = rxr->rx_agg_bmap_size / 8;
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
if (!rxr->rx_agg_bmap)
return -ENOMEM;

return 0;
}

static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
Expand Down Expand Up @@ -15354,25 +15388,37 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
goto err_free_rx_agg_ring;
}

if (bp->flags & BNXT_FLAG_TPA) {
rc = bnxt_alloc_one_tpa_info(bp, clone);
if (rc)
goto err_free_tpa_info;
}

bnxt_init_one_rx_ring_rxbd(bp, clone);
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);

bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_alloc_one_rx_ring_page(bp, clone, idx);
if (bp->flags & BNXT_FLAG_TPA)
bnxt_alloc_one_tpa_info_data(bp, clone);

return 0;

err_free_tpa_info:
bnxt_free_one_tpa_info(bp, clone);
err_free_rx_agg_ring:
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
err_free_rx_ring:
bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
clone->page_pool->p.napi = NULL;
page_pool_destroy(clone->page_pool);
if (bnxt_separate_head_pool())
page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
clone->head_pool = NULL;
return rc;
}

Expand All @@ -15382,13 +15428,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ring_struct *ring;

bnxt_free_one_rx_ring(bp, rxr);
bnxt_free_one_rx_agg_ring(bp, rxr);
bnxt_free_one_rx_ring_skbs(bp, rxr);

xdp_rxq_info_unreg(&rxr->xdp_rxq);

page_pool_destroy(rxr->page_pool);
if (bnxt_separate_head_pool())
page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
rxr->head_pool = NULL;

ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
Expand Down Expand Up @@ -15470,7 +15518,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->rx_agg_prod = clone->rx_agg_prod;
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
rxr->rx_next_cons = clone->rx_next_cons;
rxr->rx_tpa = clone->rx_tpa;
rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
rxr->page_pool = clone->page_pool;
rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;

bnxt_copy_rx_ring(bp, rxr, clone);
Expand Down Expand Up @@ -15529,6 +15580,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
if (bnxt_separate_head_pool())
page_pool_disable_direct_recycling(rxr->head_pool);

memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
Expand Down

0 comments on commit 5f4d035

Please sign in to comment.