Skip to content

Commit f6974b4

Browse files
skotur-brcmkuba-moo
authored andcommitted
bnxt_en: Fix page pool logic for page size >= 64K
The RXBD length field on all bnxt chips is 16-bit and so we cannot support a full page when the native page size is 64K or greater. The non-XDP (non page pool) code path has logic to handle this but the XDP page pool code path does not handle this. Add the missing logic to use page_pool_dev_alloc_frag() to allocate 32K chunks if the page size is 64K or greater. Fixes: 9f4b283 ("bnxt: XDP multibuffer enablement") Link: https://lore.kernel.org/netdev/[email protected]/ Reviewed-by: Andy Gospodarek <[email protected]> Signed-off-by: Somnath Kotur <[email protected]> Signed-off-by: Michael Chan <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 3ff1617 commit f6974b4

File tree

2 files changed

+29
-19
lines changed

2 files changed

+29
-19
lines changed

drivers/net/ethernet/broadcom/bnxt/bnxt.c

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -699,17 +699,24 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
699699

700700
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
701701
struct bnxt_rx_ring_info *rxr,
702+
unsigned int *offset,
702703
gfp_t gfp)
703704
{
704705
struct device *dev = &bp->pdev->dev;
705706
struct page *page;
706707

707-
page = page_pool_dev_alloc_pages(rxr->page_pool);
708+
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
709+
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
710+
BNXT_RX_PAGE_SIZE);
711+
} else {
712+
page = page_pool_dev_alloc_pages(rxr->page_pool);
713+
*offset = 0;
714+
}
708715
if (!page)
709716
return NULL;
710717

711-
*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
712-
DMA_ATTR_WEAK_ORDERING);
718+
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
719+
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
713720
if (dma_mapping_error(dev, *mapping)) {
714721
page_pool_recycle_direct(rxr->page_pool, page);
715722
return NULL;
@@ -749,15 +756,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
749756
dma_addr_t mapping;
750757

751758
if (BNXT_RX_PAGE_MODE(bp)) {
759+
unsigned int offset;
752760
struct page *page =
753-
__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
761+
__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
754762

755763
if (!page)
756764
return -ENOMEM;
757765

758766
mapping += bp->rx_dma_offset;
759767
rx_buf->data = page;
760-
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
768+
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
761769
} else {
762770
u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
763771

@@ -817,7 +825,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
817825
unsigned int offset = 0;
818826

819827
if (BNXT_RX_PAGE_MODE(bp)) {
820-
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
828+
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
821829

822830
if (!page)
823831
return -ENOMEM;
@@ -964,15 +972,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
964972
return NULL;
965973
}
966974
dma_addr -= bp->rx_dma_offset;
967-
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
968-
DMA_ATTR_WEAK_ORDERING);
969-
skb = build_skb(page_address(page), PAGE_SIZE);
975+
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
976+
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
977+
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
970978
if (!skb) {
971979
page_pool_recycle_direct(rxr->page_pool, page);
972980
return NULL;
973981
}
974982
skb_mark_for_recycle(skb);
975-
skb_reserve(skb, bp->rx_dma_offset);
983+
skb_reserve(skb, bp->rx_offset);
976984
__skb_put(skb, len);
977985

978986
return skb;
@@ -998,8 +1006,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
9981006
return NULL;
9991007
}
10001008
dma_addr -= bp->rx_dma_offset;
1001-
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1002-
DMA_ATTR_WEAK_ORDERING);
1009+
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1010+
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
10031011

10041012
if (unlikely(!payload))
10051013
payload = eth_get_headlen(bp->dev, data_ptr, len);
@@ -1012,7 +1020,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
10121020

10131021
skb_mark_for_recycle(skb);
10141022
off = (void *)data_ptr - page_address(page);
1015-
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1023+
skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
10161024
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
10171025
payload + NET_IP_ALIGN);
10181026

@@ -1143,7 +1151,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
11431151

11441152
skb->data_len += total_frag_len;
11451153
skb->len += total_frag_len;
1146-
skb->truesize += PAGE_SIZE * agg_bufs;
1154+
skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
11471155
return skb;
11481156
}
11491157

@@ -2945,8 +2953,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
29452953
rx_buf->data = NULL;
29462954
if (BNXT_RX_PAGE_MODE(bp)) {
29472955
mapping -= bp->rx_dma_offset;
2948-
dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2949-
bp->rx_dir,
2956+
dma_unmap_page_attrs(&pdev->dev, mapping,
2957+
BNXT_RX_PAGE_SIZE, bp->rx_dir,
29502958
DMA_ATTR_WEAK_ORDERING);
29512959
page_pool_recycle_direct(rxr->page_pool, data);
29522960
} else {
@@ -3215,6 +3223,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
32153223
pp.napi = &rxr->bnapi->napi;
32163224
pp.dev = &bp->pdev->dev;
32173225
pp.dma_dir = DMA_BIDIRECTIONAL;
3226+
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
3227+
pp.flags |= PP_FLAG_PAGE_FRAG;
32183228

32193229
rxr->page_pool = page_pool_create(&pp);
32203230
if (IS_ERR(rxr->page_pool)) {

drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -186,8 +186,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
186186
u16 cons, u8 *data_ptr, unsigned int len,
187187
struct xdp_buff *xdp)
188188
{
189+
u32 buflen = BNXT_RX_PAGE_SIZE;
189190
struct bnxt_sw_rx_bd *rx_buf;
190-
u32 buflen = PAGE_SIZE;
191191
struct pci_dev *pdev;
192192
dma_addr_t mapping;
193193
u32 offset;
@@ -303,7 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
303303
rx_buf = &rxr->rx_buf_ring[cons];
304304
mapping = rx_buf->mapping - bp->rx_dma_offset;
305305
dma_unmap_page_attrs(&pdev->dev, mapping,
306-
PAGE_SIZE, bp->rx_dir,
306+
BNXT_RX_PAGE_SIZE, bp->rx_dir,
307307
DMA_ATTR_WEAK_ORDERING);
308308

309309
/* if we are unable to allocate a new buffer, abort and reuse */
@@ -486,7 +486,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
486486
}
487487
xdp_update_skb_shared_info(skb, num_frags,
488488
sinfo->xdp_frags_size,
489-
PAGE_SIZE * sinfo->nr_frags,
489+
BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
490490
xdp_buff_is_frag_pfmemalloc(xdp));
491491
return skb;
492492
}

0 commit comments

Comments
 (0)