@@ -553,34 +553,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
553
553
* @xdp: xdp_buff used as input to the XDP program
554
554
* @xdp_prog: XDP program to run
555
555
* @xdp_ring: ring to be used for XDP_TX action
556
+ * @rx_buf: Rx buffer to store the XDP action
556
557
*
557
558
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
558
559
*/
559
- static int
560
+ static void
560
561
ice_run_xdp (struct ice_rx_ring * rx_ring , struct xdp_buff * xdp ,
561
- struct bpf_prog * xdp_prog , struct ice_tx_ring * xdp_ring )
562
+ struct bpf_prog * xdp_prog , struct ice_tx_ring * xdp_ring ,
563
+ struct ice_rx_buf * rx_buf )
562
564
{
563
- int err ;
565
+ unsigned int ret = ICE_XDP_PASS ;
564
566
u32 act ;
565
567
568
+ if (!xdp_prog )
569
+ goto exit ;
570
+
566
571
act = bpf_prog_run_xdp (xdp_prog , xdp );
567
572
switch (act ) {
568
573
case XDP_PASS :
569
- return ICE_XDP_PASS ;
574
+ break ;
570
575
case XDP_TX :
571
576
if (static_branch_unlikely (& ice_xdp_locking_key ))
572
577
spin_lock (& xdp_ring -> tx_lock );
573
- err = ice_xmit_xdp_ring (xdp -> data , xdp -> data_end - xdp -> data , xdp_ring );
578
+ ret = ice_xmit_xdp_ring (xdp -> data , xdp -> data_end - xdp -> data , xdp_ring );
574
579
if (static_branch_unlikely (& ice_xdp_locking_key ))
575
580
spin_unlock (& xdp_ring -> tx_lock );
576
- if (err == ICE_XDP_CONSUMED )
581
+ if (ret == ICE_XDP_CONSUMED )
577
582
goto out_failure ;
578
- return err ;
583
+ break ;
579
584
case XDP_REDIRECT :
580
- err = xdp_do_redirect (rx_ring -> netdev , xdp , xdp_prog );
581
- if (err )
585
+ if (xdp_do_redirect (rx_ring -> netdev , xdp , xdp_prog ))
582
586
goto out_failure ;
583
- return ICE_XDP_REDIR ;
587
+ ret = ICE_XDP_REDIR ;
588
+ break ;
584
589
default :
585
590
bpf_warn_invalid_xdp_action (rx_ring -> netdev , xdp_prog , act );
586
591
fallthrough ;
@@ -589,8 +594,10 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
589
594
trace_xdp_exception (rx_ring -> netdev , xdp_prog , act );
590
595
fallthrough ;
591
596
case XDP_DROP :
592
- return ICE_XDP_CONSUMED ;
597
+ ret = ICE_XDP_CONSUMED ;
593
598
}
599
+ exit :
600
+ rx_buf -> act = ret ;
594
601
}
595
602
596
603
/**
@@ -855,9 +862,6 @@ ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
855
862
return ;
856
863
skb_add_rx_frag (skb , skb_shinfo (skb )-> nr_frags , rx_buf -> page ,
857
864
rx_buf -> page_offset , size , truesize );
858
-
859
- /* page is being used so we must update the page offset */
860
- ice_rx_buf_adjust_pg_offset (rx_buf , truesize );
861
865
}
862
866
863
867
/**
@@ -970,9 +974,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
970
974
if (metasize )
971
975
skb_metadata_set (skb , metasize );
972
976
973
- /* buffer is used by skb, update page_offset */
974
- ice_rx_buf_adjust_pg_offset (rx_buf , truesize );
975
-
976
977
return skb ;
977
978
}
978
979
@@ -1023,14 +1024,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1023
1024
#endif
1024
1025
skb_add_rx_frag (skb , 0 , rx_buf -> page ,
1025
1026
rx_buf -> page_offset + headlen , size , truesize );
1026
- /* buffer is used by skb, update page_offset */
1027
- ice_rx_buf_adjust_pg_offset (rx_buf , truesize );
1028
1027
} else {
1029
- /* buffer is unused, reset bias back to rx_buf; data was copied
1030
- * onto skb's linear part so there's no need for adjusting
1031
- * page offset and we can reuse this buffer as-is
1028
+ /* buffer is unused, change the act that should be taken later
1029
+ * on; data was copied onto skb's linear part so there's no
1030
+ * need for adjusting page offset and we can reuse this buffer
1031
+ * as-is
1032
1032
*/
1033
- rx_buf -> pagecnt_bias ++ ;
1033
+ rx_buf -> act = ICE_XDP_CONSUMED ;
1034
1034
}
1035
1035
1036
1036
return skb ;
@@ -1084,11 +1084,12 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1084
1084
unsigned int offset = rx_ring -> rx_offset ;
1085
1085
struct xdp_buff * xdp = & rx_ring -> xdp ;
1086
1086
struct ice_tx_ring * xdp_ring = NULL ;
1087
- unsigned int xdp_res , xdp_xmit = 0 ;
1088
1087
struct sk_buff * skb = rx_ring -> skb ;
1089
1088
struct bpf_prog * xdp_prog = NULL ;
1090
1089
u32 ntc = rx_ring -> next_to_clean ;
1091
1090
u32 cnt = rx_ring -> count ;
1091
+ u32 cached_ntc = ntc ;
1092
+ u32 xdp_xmit = 0 ;
1092
1093
bool failure ;
1093
1094
1094
1095
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
@@ -1137,7 +1138,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1137
1138
ice_vc_fdir_irq_handler (ctrl_vsi , rx_desc );
1138
1139
if (++ ntc == cnt )
1139
1140
ntc = 0 ;
1140
- ice_put_rx_buf (rx_ring , NULL );
1141
1141
cleaned_count ++ ;
1142
1142
continue ;
1143
1143
}
@@ -1164,25 +1164,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1164
1164
xdp -> frame_sz = ice_rx_frame_truesize (rx_ring , size );
1165
1165
#endif
1166
1166
1167
- if (!xdp_prog )
1167
+ ice_run_xdp (rx_ring , xdp , xdp_prog , xdp_ring , rx_buf );
1168
+ if (rx_buf -> act == ICE_XDP_PASS )
1168
1169
goto construct_skb ;
1169
-
1170
- xdp_res = ice_run_xdp (rx_ring , xdp , xdp_prog , xdp_ring );
1171
- if (!xdp_res )
1172
- goto construct_skb ;
1173
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR )) {
1174
- xdp_xmit |= xdp_res ;
1175
- ice_rx_buf_adjust_pg_offset (rx_buf , xdp -> frame_sz );
1176
- } else {
1177
- rx_buf -> pagecnt_bias ++ ;
1178
- }
1179
1170
total_rx_bytes += size ;
1180
1171
total_rx_pkts ++ ;
1181
1172
1182
1173
cleaned_count ++ ;
1183
1174
if (++ ntc == cnt )
1184
1175
ntc = 0 ;
1185
- ice_put_rx_buf (rx_ring , rx_buf );
1186
1176
continue ;
1187
1177
construct_skb :
1188
1178
if (skb ) {
@@ -1203,7 +1193,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1203
1193
1204
1194
if (++ ntc == cnt )
1205
1195
ntc = 0 ;
1206
- ice_put_rx_buf (rx_ring , rx_buf );
1207
1196
cleaned_count ++ ;
1208
1197
1209
1198
/* skip if it is NOP desc */
@@ -1243,6 +1232,22 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1243
1232
total_rx_pkts ++ ;
1244
1233
}
1245
1234
1235
+ while (cached_ntc != ntc ) {
1236
+ struct ice_rx_buf * buf = & rx_ring -> rx_buf [cached_ntc ];
1237
+
1238
+ if (buf -> act & (ICE_XDP_TX | ICE_XDP_REDIR )) {
1239
+ ice_rx_buf_adjust_pg_offset (buf , xdp -> frame_sz );
1240
+ xdp_xmit |= buf -> act ;
1241
+ } else if (buf -> act & ICE_XDP_CONSUMED ) {
1242
+ buf -> pagecnt_bias ++ ;
1243
+ } else if (buf -> act == ICE_XDP_PASS ) {
1244
+ ice_rx_buf_adjust_pg_offset (buf , xdp -> frame_sz );
1245
+ }
1246
+
1247
+ ice_put_rx_buf (rx_ring , buf );
1248
+ if (++ cached_ntc >= cnt )
1249
+ cached_ntc = 0 ;
1250
+ }
1246
1251
rx_ring -> next_to_clean = ntc ;
1247
1252
/* return up to cleaned_count buffers to hardware */
1248
1253
failure = ice_alloc_rx_bufs (rx_ring , cleaned_count );
0 commit comments