@@ -4708,7 +4708,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
4708
4708
/* Changing allocation mode of RX rings.
4709
4709
* TODO: Update when extending xdp_rxq_info to support allocation modes.
4710
4710
*/
4711
- int bnxt_set_rx_skb_mode (struct bnxt * bp , bool page_mode )
4711
+ static void __bnxt_set_rx_skb_mode (struct bnxt * bp , bool page_mode )
4712
4712
{
4713
4713
struct net_device * dev = bp -> dev ;
4714
4714
@@ -4729,15 +4729,30 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4729
4729
bp -> rx_skb_func = bnxt_rx_page_skb ;
4730
4730
}
4731
4731
bp -> rx_dir = DMA_BIDIRECTIONAL ;
4732
- /* Disable LRO or GRO_HW */
4733
- netdev_update_features (dev );
4734
4732
} else {
4735
4733
dev -> max_mtu = bp -> max_mtu ;
4736
4734
bp -> flags &= ~BNXT_FLAG_RX_PAGE_MODE ;
4737
4735
bp -> rx_dir = DMA_FROM_DEVICE ;
4738
4736
bp -> rx_skb_func = bnxt_rx_skb ;
4739
4737
}
4740
- return 0 ;
4738
+ }
4739
+
4740
+ void bnxt_set_rx_skb_mode (struct bnxt * bp , bool page_mode )
4741
+ {
4742
+ __bnxt_set_rx_skb_mode (bp , page_mode );
4743
+
4744
+ if (!page_mode ) {
4745
+ int rx , tx ;
4746
+
4747
+ bnxt_get_max_rings (bp , & rx , & tx , true);
4748
+ if (rx > 1 ) {
4749
+ bp -> flags &= ~BNXT_FLAG_NO_AGG_RINGS ;
4750
+ bp -> dev -> hw_features |= NETIF_F_LRO ;
4751
+ }
4752
+ }
4753
+
4754
+ /* Update LRO and GRO_HW availability */
4755
+ netdev_update_features (bp -> dev );
4741
4756
}
4742
4757
4743
4758
static void bnxt_free_vnic_attributes (struct bnxt * bp )
@@ -16259,7 +16274,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
16259
16274
if (bp -> max_fltr < BNXT_MAX_FLTR )
16260
16275
bp -> max_fltr = BNXT_MAX_FLTR ;
16261
16276
bnxt_init_l2_fltr_tbl (bp );
16262
- bnxt_set_rx_skb_mode (bp , false);
16277
+ __bnxt_set_rx_skb_mode (bp , false);
16263
16278
bnxt_set_tpa_flags (bp );
16264
16279
bnxt_set_ring_params (bp );
16265
16280
bnxt_rdma_aux_device_init (bp );
0 commit comments