Skip to content

Commit 6208799

Browse files
Heng Qidavem330
Heng Qi
authored andcommitted
virtio-net: support rx netdim
By comparing the traffic information in the complete napi processes, let the virtio-net driver automatically adjust the coalescing moderation parameters of each receive queue. Signed-off-by: Heng Qi <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 1db43c0 commit 6208799

File tree

2 files changed

+163
-14
lines changed

2 files changed

+163
-14
lines changed

drivers/net/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -434,6 +434,7 @@ config VIRTIO_NET
434434
tristate "Virtio network driver"
435435
depends on VIRTIO
436436
select NET_FAILOVER
437+
select DIMLIB
437438
help
438439
This is the virtual network driver for virtio. It can be used with
439440
QEMU based VMMs (like KVM or Xen). Say Y or M.

drivers/net/virtio_net.c

Lines changed: 162 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/average.h>
2020
#include <linux/filter.h>
2121
#include <linux/kernel.h>
22+
#include <linux/dim.h>
2223
#include <net/route.h>
2324
#include <net/xdp.h>
2425
#include <net/net_failover.h>
@@ -172,6 +173,17 @@ struct receive_queue {
172173

173174
struct virtnet_rq_stats stats;
174175

176+
/* The number of rx notifications */
177+
u16 calls;
178+
179+
/* Is dynamic interrupt moderation enabled? */
180+
bool dim_enabled;
181+
182+
/* Dynamic Interrupt Moderation */
183+
struct dim dim;
184+
185+
u32 packets_in_napi;
186+
175187
struct virtnet_interrupt_coalesce intr_coal;
176188

177189
/* Chain pages by the private ptr. */
@@ -305,6 +317,9 @@ struct virtnet_info {
305317
u8 duplex;
306318
u32 speed;
307319

320+
/* Is rx dynamic interrupt moderation enabled? */
321+
bool rx_dim_enabled;
322+
308323
/* Interrupt coalescing settings */
309324
struct virtnet_interrupt_coalesce intr_coal_tx;
310325
struct virtnet_interrupt_coalesce intr_coal_rx;
@@ -2001,6 +2016,7 @@ static void skb_recv_done(struct virtqueue *rvq)
20012016
struct virtnet_info *vi = rvq->vdev->priv;
20022017
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
20032018

2019+
rq->calls++;
20042020
virtqueue_napi_schedule(&rq->napi, rvq);
20052021
}
20062022

@@ -2141,6 +2157,24 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
21412157
}
21422158
}
21432159

2160+
static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq)
2161+
{
2162+
struct dim_sample cur_sample = {};
2163+
2164+
if (!rq->packets_in_napi)
2165+
return;
2166+
2167+
u64_stats_update_begin(&rq->stats.syncp);
2168+
dim_update_sample(rq->calls,
2169+
u64_stats_read(&rq->stats.packets),
2170+
u64_stats_read(&rq->stats.bytes),
2171+
&cur_sample);
2172+
u64_stats_update_end(&rq->stats.syncp);
2173+
2174+
net_dim(&rq->dim, cur_sample);
2175+
rq->packets_in_napi = 0;
2176+
}
2177+
21442178
static int virtnet_poll(struct napi_struct *napi, int budget)
21452179
{
21462180
struct receive_queue *rq =
@@ -2149,17 +2183,22 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
21492183
struct send_queue *sq;
21502184
unsigned int received;
21512185
unsigned int xdp_xmit = 0;
2186+
bool napi_complete;
21522187

21532188
virtnet_poll_cleantx(rq);
21542189

21552190
received = virtnet_receive(rq, budget, &xdp_xmit);
2191+
rq->packets_in_napi += received;
21562192

21572193
if (xdp_xmit & VIRTIO_XDP_REDIR)
21582194
xdp_do_flush();
21592195

21602196
/* Out of packets? */
2161-
if (received < budget)
2162-
virtqueue_napi_complete(napi, rq->vq, received);
2197+
if (received < budget) {
2198+
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
2199+
if (napi_complete && rq->dim_enabled)
2200+
virtnet_rx_dim_update(vi, rq);
2201+
}
21632202

21642203
if (xdp_xmit & VIRTIO_XDP_TX) {
21652204
sq = virtnet_xdp_get_sq(vi);
@@ -2230,8 +2269,11 @@ static int virtnet_open(struct net_device *dev)
22302269
disable_delayed_refill(vi);
22312270
cancel_delayed_work_sync(&vi->refill);
22322271

2233-
for (i--; i >= 0; i--)
2272+
for (i--; i >= 0; i--) {
22342273
virtnet_disable_queue_pair(vi, i);
2274+
cancel_work_sync(&vi->rq[i].dim.work);
2275+
}
2276+
22352277
return err;
22362278
}
22372279

@@ -2393,8 +2435,10 @@ static int virtnet_rx_resize(struct virtnet_info *vi,
23932435

23942436
qindex = rq - vi->rq;
23952437

2396-
if (running)
2438+
if (running) {
23972439
napi_disable(&rq->napi);
2440+
cancel_work_sync(&rq->dim.work);
2441+
}
23982442

23992443
err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
24002444
if (err)
@@ -2641,8 +2685,10 @@ static int virtnet_close(struct net_device *dev)
26412685
/* Make sure refill_work doesn't re-enable napi! */
26422686
cancel_delayed_work_sync(&vi->refill);
26432687

2644-
for (i = 0; i < vi->max_queue_pairs; i++)
2688+
for (i = 0; i < vi->max_queue_pairs; i++) {
26452689
virtnet_disable_queue_pair(vi, i);
2690+
cancel_work_sync(&vi->rq[i].dim.work);
2691+
}
26462692

26472693
return 0;
26482694
}
@@ -2914,9 +2960,6 @@ static void virtnet_get_ringparam(struct net_device *dev,
29142960
ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
29152961
}
29162962

2917-
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2918-
u16 vqn, u32 max_usecs, u32 max_packets);
2919-
29202963
static int virtnet_set_ringparam(struct net_device *dev,
29212964
struct ethtool_ringparam *ring,
29222965
struct kernel_ethtool_ringparam *kernel_ring,
@@ -3327,7 +3370,6 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
33273370
&sgs_tx))
33283371
return -EINVAL;
33293372

3330-
/* Save parameters */
33313373
vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
33323374
vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
33333375
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -3341,9 +3383,34 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
33413383
static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
33423384
struct ethtool_coalesce *ec)
33433385
{
3386+
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
33443387
struct scatterlist sgs_rx;
33453388
int i;
33463389

3390+
if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3391+
return -EOPNOTSUPP;
3392+
3393+
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs ||
3394+
ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
3395+
return -EINVAL;
3396+
3397+
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
3398+
vi->rx_dim_enabled = true;
3399+
for (i = 0; i < vi->max_queue_pairs; i++)
3400+
vi->rq[i].dim_enabled = true;
3401+
return 0;
3402+
}
3403+
3404+
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
3405+
vi->rx_dim_enabled = false;
3406+
for (i = 0; i < vi->max_queue_pairs; i++)
3407+
vi->rq[i].dim_enabled = false;
3408+
}
3409+
3410+
/* Since the per-queue coalescing params can be set,
3411+
* we need apply the global new params even if they
3412+
* are not updated.
3413+
*/
33473414
vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
33483415
vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
33493416
sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
@@ -3353,7 +3420,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
33533420
&sgs_rx))
33543421
return -EINVAL;
33553422

3356-
/* Save parameters */
33573423
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
33583424
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
33593425
for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -3380,18 +3446,52 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
33803446
return 0;
33813447
}
33823448

3383-
static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3384-
struct ethtool_coalesce *ec,
3385-
u16 queue)
3449+
static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
3450+
struct ethtool_coalesce *ec,
3451+
u16 queue)
33863452
{
3453+
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
3454+
bool cur_rx_dim = vi->rq[queue].dim_enabled;
3455+
u32 max_usecs, max_packets;
33873456
int err;
33883457

3458+
max_usecs = vi->rq[queue].intr_coal.max_usecs;
3459+
max_packets = vi->rq[queue].intr_coal.max_packets;
3460+
3461+
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
3462+
ec->rx_max_coalesced_frames != max_packets))
3463+
return -EINVAL;
3464+
3465+
if (rx_ctrl_dim_on && !cur_rx_dim) {
3466+
vi->rq[queue].dim_enabled = true;
3467+
return 0;
3468+
}
3469+
3470+
if (!rx_ctrl_dim_on && cur_rx_dim)
3471+
vi->rq[queue].dim_enabled = false;
3472+
3473+
/* If no params are updated, userspace ethtool will
3474+
* reject the modification.
3475+
*/
33893476
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
33903477
ec->rx_coalesce_usecs,
33913478
ec->rx_max_coalesced_frames);
33923479
if (err)
33933480
return err;
33943481

3482+
return 0;
3483+
}
3484+
3485+
static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3486+
struct ethtool_coalesce *ec,
3487+
u16 queue)
3488+
{
3489+
int err;
3490+
3491+
err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue);
3492+
if (err)
3493+
return err;
3494+
33953495
err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue,
33963496
ec->tx_coalesce_usecs,
33973497
ec->tx_max_coalesced_frames);
@@ -3401,6 +3501,49 @@ static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
34013501
return 0;
34023502
}
34033503

3504+
static void virtnet_rx_dim_work(struct work_struct *work)
3505+
{
3506+
struct dim *dim = container_of(work, struct dim, work);
3507+
struct receive_queue *rq = container_of(dim,
3508+
struct receive_queue, dim);
3509+
struct virtnet_info *vi = rq->vq->vdev->priv;
3510+
struct net_device *dev = vi->dev;
3511+
struct dim_cq_moder update_moder;
3512+
int i, qnum, err;
3513+
3514+
if (!rtnl_trylock())
3515+
return;
3516+
3517+
/* Each rxq's work is queued by "net_dim()->schedule_work()"
3518+
* in response to NAPI traffic changes. Note that dim->profile_ix
3519+
* for each rxq is updated prior to the queuing action.
3520+
* So we only need to traverse and update profiles for all rxqs
3521+
* in the work which is holding rtnl_lock.
3522+
*/
3523+
for (i = 0; i < vi->curr_queue_pairs; i++) {
3524+
rq = &vi->rq[i];
3525+
dim = &rq->dim;
3526+
qnum = rq - vi->rq;
3527+
3528+
if (!rq->dim_enabled)
3529+
continue;
3530+
3531+
update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
3532+
if (update_moder.usec != rq->intr_coal.max_usecs ||
3533+
update_moder.pkts != rq->intr_coal.max_packets) {
3534+
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
3535+
update_moder.usec,
3536+
update_moder.pkts);
3537+
if (err)
3538+
pr_debug("%s: Failed to send dim parameters on rxq%d\n",
3539+
dev->name, qnum);
3540+
dim->state = DIM_START_MEASURE;
3541+
}
3542+
}
3543+
3544+
rtnl_unlock();
3545+
}
3546+
34043547
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
34053548
{
34063549
/* usecs coalescing is supported only if VIRTIO_NET_F_NOTF_COAL
@@ -3482,6 +3625,7 @@ static int virtnet_get_coalesce(struct net_device *dev,
34823625
ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
34833626
ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
34843627
ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3628+
ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled;
34853629
} else {
34863630
ec->rx_max_coalesced_frames = 1;
34873631

@@ -3539,6 +3683,7 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
35393683
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
35403684
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
35413685
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3686+
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
35423687
} else {
35433688
ec->rx_max_coalesced_frames = 1;
35443689

@@ -3664,7 +3809,7 @@ static int virtnet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
36643809

36653810
static const struct ethtool_ops virtnet_ethtool_ops = {
36663811
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
3667-
ETHTOOL_COALESCE_USECS,
3812+
ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
36683813
.get_drvinfo = virtnet_get_drvinfo,
36693814
.get_link = ethtool_op_get_link,
36703815
.get_ringparam = virtnet_get_ringparam,
@@ -4254,6 +4399,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
42544399
virtnet_poll_tx,
42554400
napi_tx ? napi_weight : 0);
42564401

4402+
INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
4403+
vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
4404+
42574405
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
42584406
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
42594407
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));

0 commit comments

Comments
 (0)