@@ -699,17 +699,24 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
699
699
700
700
static struct page * __bnxt_alloc_rx_page (struct bnxt * bp , dma_addr_t * mapping ,
701
701
struct bnxt_rx_ring_info * rxr ,
702
+ unsigned int * offset ,
702
703
gfp_t gfp )
703
704
{
704
705
struct device * dev = & bp -> pdev -> dev ;
705
706
struct page * page ;
706
707
707
- page = page_pool_dev_alloc_pages (rxr -> page_pool );
708
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE ) {
709
+ page = page_pool_dev_alloc_frag (rxr -> page_pool , offset ,
710
+ BNXT_RX_PAGE_SIZE );
711
+ } else {
712
+ page = page_pool_dev_alloc_pages (rxr -> page_pool );
713
+ * offset = 0 ;
714
+ }
708
715
if (!page )
709
716
return NULL ;
710
717
711
- * mapping = dma_map_page_attrs (dev , page , 0 , PAGE_SIZE , bp -> rx_dir ,
712
- DMA_ATTR_WEAK_ORDERING );
718
+ * mapping = dma_map_page_attrs (dev , page , * offset , BNXT_RX_PAGE_SIZE ,
719
+ bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
713
720
if (dma_mapping_error (dev , * mapping )) {
714
721
page_pool_recycle_direct (rxr -> page_pool , page );
715
722
return NULL ;
@@ -749,15 +756,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
749
756
dma_addr_t mapping ;
750
757
751
758
if (BNXT_RX_PAGE_MODE (bp )) {
759
+ unsigned int offset ;
752
760
struct page * page =
753
- __bnxt_alloc_rx_page (bp , & mapping , rxr , gfp );
761
+ __bnxt_alloc_rx_page (bp , & mapping , rxr , & offset , gfp );
754
762
755
763
if (!page )
756
764
return - ENOMEM ;
757
765
758
766
mapping += bp -> rx_dma_offset ;
759
767
rx_buf -> data = page ;
760
- rx_buf -> data_ptr = page_address (page ) + bp -> rx_offset ;
768
+ rx_buf -> data_ptr = page_address (page ) + offset + bp -> rx_offset ;
761
769
} else {
762
770
u8 * data = __bnxt_alloc_rx_frag (bp , & mapping , gfp );
763
771
@@ -817,7 +825,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
817
825
unsigned int offset = 0 ;
818
826
819
827
if (BNXT_RX_PAGE_MODE (bp )) {
820
- page = __bnxt_alloc_rx_page (bp , & mapping , rxr , gfp );
828
+ page = __bnxt_alloc_rx_page (bp , & mapping , rxr , & offset , gfp );
821
829
822
830
if (!page )
823
831
return - ENOMEM ;
@@ -964,15 +972,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
964
972
return NULL ;
965
973
}
966
974
dma_addr -= bp -> rx_dma_offset ;
967
- dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , PAGE_SIZE , bp -> rx_dir ,
968
- DMA_ATTR_WEAK_ORDERING );
969
- skb = build_skb (page_address ( page ), PAGE_SIZE );
975
+ dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
976
+ bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
977
+ skb = build_skb (data_ptr - bp -> rx_offset , BNXT_RX_PAGE_SIZE );
970
978
if (!skb ) {
971
979
page_pool_recycle_direct (rxr -> page_pool , page );
972
980
return NULL ;
973
981
}
974
982
skb_mark_for_recycle (skb );
975
- skb_reserve (skb , bp -> rx_dma_offset );
983
+ skb_reserve (skb , bp -> rx_offset );
976
984
__skb_put (skb , len );
977
985
978
986
return skb ;
@@ -998,8 +1006,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
998
1006
return NULL ;
999
1007
}
1000
1008
dma_addr -= bp -> rx_dma_offset ;
1001
- dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , PAGE_SIZE , bp -> rx_dir ,
1002
- DMA_ATTR_WEAK_ORDERING );
1009
+ dma_unmap_page_attrs (& bp -> pdev -> dev , dma_addr , BNXT_RX_PAGE_SIZE ,
1010
+ bp -> rx_dir , DMA_ATTR_WEAK_ORDERING );
1003
1011
1004
1012
if (unlikely (!payload ))
1005
1013
payload = eth_get_headlen (bp -> dev , data_ptr , len );
@@ -1012,7 +1020,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1012
1020
1013
1021
skb_mark_for_recycle (skb );
1014
1022
off = (void * )data_ptr - page_address (page );
1015
- skb_add_rx_frag (skb , 0 , page , off , len , PAGE_SIZE );
1023
+ skb_add_rx_frag (skb , 0 , page , off , len , BNXT_RX_PAGE_SIZE );
1016
1024
memcpy (skb -> data - NET_IP_ALIGN , data_ptr - NET_IP_ALIGN ,
1017
1025
payload + NET_IP_ALIGN );
1018
1026
@@ -1143,7 +1151,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1143
1151
1144
1152
skb -> data_len += total_frag_len ;
1145
1153
skb -> len += total_frag_len ;
1146
- skb -> truesize += PAGE_SIZE * agg_bufs ;
1154
+ skb -> truesize += BNXT_RX_PAGE_SIZE * agg_bufs ;
1147
1155
return skb ;
1148
1156
}
1149
1157
@@ -2945,8 +2953,8 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2945
2953
rx_buf -> data = NULL ;
2946
2954
if (BNXT_RX_PAGE_MODE (bp )) {
2947
2955
mapping -= bp -> rx_dma_offset ;
2948
- dma_unmap_page_attrs (& pdev -> dev , mapping , PAGE_SIZE ,
2949
- bp -> rx_dir ,
2956
+ dma_unmap_page_attrs (& pdev -> dev , mapping ,
2957
+ BNXT_RX_PAGE_SIZE , bp -> rx_dir ,
2950
2958
DMA_ATTR_WEAK_ORDERING );
2951
2959
page_pool_recycle_direct (rxr -> page_pool , data );
2952
2960
} else {
@@ -3215,6 +3223,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3215
3223
pp .napi = & rxr -> bnapi -> napi ;
3216
3224
pp .dev = & bp -> pdev -> dev ;
3217
3225
pp .dma_dir = DMA_BIDIRECTIONAL ;
3226
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE )
3227
+ pp .flags |= PP_FLAG_PAGE_FRAG ;
3218
3228
3219
3229
rxr -> page_pool = page_pool_create (& pp );
3220
3230
if (IS_ERR (rxr -> page_pool )) {
0 commit comments