[PATCH 4.19 27/74] virtio_net: Dont call free_old_xmit_skbs for xdp_frames

From: Greg Kroah-Hartman
Date: Mon Feb 04 2019 - 05:48:40 EST


4.19-stable review patch. If anyone has any objections, please let me know.

------------------

From: Toshiaki Makita <makita.toshiaki@xxxxxxxxxxxxx>

[ Upstream commit 534da5e856334fb54cb0272a9fb3afec28ea3aed ]

When napi_tx is enabled, virtnet_poll_cleantx() called
free_old_xmit_skbs() even for xdp send queue.
This is bogus since the queue has xdp_frames, not sk_buffs, thus mangled
device tx bytes counters because skb->len is meaningless value, and even
triggered oops due to general protection fault on freeing them.

Since xdp send queues do not aquire locks, old xdp_frames should be
freed only in virtnet_xdp_xmit(), so just skip free_old_xmit_skbs() for
xdp send queues.

Similarly virtnet_poll_tx() called free_old_xmit_skbs(). This NAPI
handler is called even without calling start_xmit() because cb for tx is
by default enabled. Once the handler is called, it enabled the cb again,
and then the handler would be called again. We don't need this handler
for XDP, so don't enable cb as well as not calling free_old_xmit_skbs().

Also, we need to disable tx NAPI when disabling XDP, so
virtnet_poll_tx() can safely access curr_queue_pairs and
xdp_queue_pairs, which are not atomically updated while disabling XDP.

Fixes: b92f1e6751a6 ("virtio-net: transmit napi")
Fixes: 7b0411ef4aa6 ("virtio-net: clean tx descriptors from rx napi")
Signed-off-by: Toshiaki Makita <makita.toshiaki@xxxxxxxxxxxxx>
Acked-by: Jason Wang <jasowang@xxxxxxxxxx>
Acked-by: Michael S. Tsirkin <mst@xxxxxxxxxx>
Signed-off-by: David S. Miller <davem@xxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
drivers/net/virtio_net.c | 49 +++++++++++++++++++++++++++++++----------------
1 file changed, 33 insertions(+), 16 deletions(-)

--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1357,6 +1357,16 @@ static void free_old_xmit_skbs(struct se
u64_stats_update_end(&sq->stats.syncp);
}

+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+ return false;
+ else if (q < vi->curr_queue_pairs)
+ return true;
+ else
+ return false;
+}
+
static void virtnet_poll_cleantx(struct receive_queue *rq)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
@@ -1364,7 +1374,7 @@ static void virtnet_poll_cleantx(struct
struct send_queue *sq = &vi->sq[index];
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);

- if (!sq->napi.weight)
+ if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
return;

if (__netif_tx_trylock(txq)) {
@@ -1441,8 +1451,16 @@ static int virtnet_poll_tx(struct napi_s
{
struct send_queue *sq = container_of(napi, struct send_queue, napi);
struct virtnet_info *vi = sq->vq->vdev->priv;
- struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
+ unsigned int index = vq2txq(sq->vq);
+ struct netdev_queue *txq;

+ if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
+ /* We don't need to enable cb for XDP */
+ napi_complete_done(napi, 0);
+ return 0;
+ }
+
+ txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
free_old_xmit_skbs(sq);
__netif_tx_unlock(txq);
@@ -2352,9 +2370,12 @@ static int virtnet_xdp_set(struct net_de
}

/* Make sure NAPI is not using any XDP TX queues for RX. */
- if (netif_running(dev))
- for (i = 0; i < vi->max_queue_pairs; i++)
+ if (netif_running(dev)) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
napi_disable(&vi->rq[i].napi);
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
+ }
+ }

netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
@@ -2373,16 +2394,22 @@ static int virtnet_xdp_set(struct net_de
}
if (old_prog)
bpf_prog_put(old_prog);
- if (netif_running(dev))
+ if (netif_running(dev)) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}

return 0;

err:
if (netif_running(dev)) {
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
+ &vi->sq[i].napi);
+ }
}
if (prog)
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
@@ -2539,16 +2566,6 @@ static void free_receive_page_frags(stru
put_page(vi->rq[i].alloc_frag.page);
}

-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
- return false;
- else if (q < vi->curr_queue_pairs)
- return true;
- else
- return false;
-}
-
static void free_unused_bufs(struct virtnet_info *vi)
{
void *buf;