RE: [PATCH v2 1/1] Drop Tx network packet when Tx TmFIFO is full

From: Liming Sun
Date: Fri Jan 05 2024 - 12:40:11 EST




> -----Original Message-----
> From: Ilpo Järvinen <ilpo.jarvinen@xxxxxxxxxxxxxxx>
> Sent: Thursday, January 4, 2024 12:39 PM
> To: Liming Sun <limings@xxxxxxxxxx>
> Cc: Vadim Pasternak <vadimp@xxxxxxxxxx>; David Thompson
> <davthompson@xxxxxxxxxx>; Hans de Goede <hdegoede@xxxxxxxxxx>;
> Mark Gross <markgross@xxxxxxxxxx>; Dan Carpenter
> <dan.carpenter@xxxxxxxxxx>; platform-driver-x86@xxxxxxxxxxxxxxx; LKML
> <linux-kernel@xxxxxxxxxxxxxxx>
> Subject: Re: [PATCH v2 1/1] Drop Tx network packet when Tx TmFIFO is full
>
> On Thu, 4 Jan 2024, Liming Sun wrote:
>
> > Starting from Linux 5.16 kernel, Tx timeout mechanism was added
> > into the virtio_net driver which prints the "Tx timeout" message
> > when a packet is stuck in Tx queue for too long which could happen
> > when external host driver is stuck or stopped and failed to read
> > the FIFO.
> >
> > Below is an example of the reported message:
> >
> > "[494105.316739] virtio_net virtio1 tmfifo_net0: TX timeout on
> > queue: 0, sq: output.0, vq: 0×1, name: output.0, usecs since
> > last trans: 3079892256".
> >
> > To avoid such "Tx timeout" messages, this commit adds a timeout
> > mechanism to drop and release the pending Tx packet if not able to
> > transmit for two seconds due to Tx FIFO full.
> >
> > This commit also handles the special case that the packet is half-
> > transmitted into the Tx FIFO. In such case, the packet is discarded
> > with remaining length stored in vring->rem_padding. So paddings with
> > zeros can be sent out when Tx space is available to maintain the
> > integrity of the packet format. The padded packet will be dropped on
> > the receiving side.
> >
> > Signed-off-by: Liming Sun <limings@xxxxxxxxxx>
>
> This doesn't really explain how it helps (other than avoiding the
> message which sounds like just hiding the issue). That is, how this helps
> to resume Tx? Or does Tx resume? There's nothing to indicate either way.

As the commit message mentioned, the expired packet is discarded and the
packet buffer is released (see changes of calling mlxbf_tmfifo_release_pkt()).
The Tx will resume automatically once the FIFO space is available, such as when
external host driver starts to drain the TMFIFO. No need for any other logic.

>
> --
> i.
>
>
> > ---
> > drivers/platform/mellanox/mlxbf-tmfifo.c | 67
> ++++++++++++++++++++++++
> > 1 file changed, 67 insertions(+)
> >
> > diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c
> b/drivers/platform/mellanox/mlxbf-tmfifo.c
> > index 5c683b4eaf10..f39b7b9d2bfe 100644
> > --- a/drivers/platform/mellanox/mlxbf-tmfifo.c
> > +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
> > @@ -47,6 +47,9 @@
> > /* Message with data needs at least two words (for header & data). */
> > #define MLXBF_TMFIFO_DATA_MIN_WORDS 2
> >
> > +/* Tx timeout in milliseconds. */
> > +#define TMFIFO_TX_TIMEOUT 2000
> > +
> > /* ACPI UID for BlueField-3. */
> > #define TMFIFO_BF3_UID 1
> >
> > @@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
> > * @drop_desc: dummy desc for packet dropping
> > * @cur_len: processed length of the current descriptor
> > * @rem_len: remaining length of the pending packet
> > + * @rem_padding: remaining bytes to send as paddings
> > * @pkt_len: total length of the pending packet
> > * @next_avail: next avail descriptor id
> > * @num: vring size (number of descriptors)
> > * @align: vring alignment size
> > * @index: vring index
> > * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
> > + * @tx_timeout: expire time of last tx packet
> > * @fifo: pointer to the tmfifo structure
> > */
> > struct mlxbf_tmfifo_vring {
> > @@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
> > struct vring_desc drop_desc;
> > int cur_len;
> > int rem_len;
> > + int rem_padding;
> > u32 pkt_len;
> > u16 next_avail;
> > int num;
> > int align;
> > int index;
> > int vdev_id;
> > + unsigned long tx_timeout;
> > struct mlxbf_tmfifo *fifo;
> > };
> >
> > @@ -819,6 +826,50 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct
> mlxbf_tmfifo_vring *vring,
> > return true;
> > }
> >
> > +static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring
> *vring)
> > +{
> > + unsigned long flags;
> > +
> > + /* Only handle Tx timeout for network vdev. */
> > + if (vring->vdev_id != VIRTIO_ID_NET)
> > + return;
> > +
> > + /* Initialize the timeout or return if not expired. */
> > + if (!vring->tx_timeout) {
> > + /* Initialize the timeout. */
> > + vring->tx_timeout = jiffies +
> > + msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
> > + return;
> > + } else if (time_before(jiffies, vring->tx_timeout)) {
> > + /* Return if not timeout yet. */
> > + return;
> > + }
> > +
> > + /*
> > + * Drop the packet after timeout. The outstanding packet is
> > + * released and the remaining bytes will be sent with padding byte
> 0x00
> > + * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
> > + * either dropped directly, or appended into existing outstanding
> packet
> > + * thus dropped as corrupted network packet.
> > + */
> > + vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
> > + mlxbf_tmfifo_release_pkt(vring);
> > + vring->cur_len = 0;
> > + vring->rem_len = 0;
> > + vring->fifo->vring[0] = NULL;
> > +
> > + /*
> > + * Make sure the load/store are in order before
> > + * returning back to virtio.
> > + */
> > + virtio_mb(false);
> > +
> > + /* Notify upper layer. */
> > + spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
> > + vring_interrupt(0, vring->vq);
> > + spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
> > +}
> > +
> > /* Rx & Tx processing of a queue. */
> > static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
> > {
> > @@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct
> mlxbf_tmfifo_vring *vring, bool is_rx)
> > return;
> >
> > do {
> > +retry:
> > /* Get available FIFO space. */
> > if (avail == 0) {
> > if (is_rx)
> > @@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct
> mlxbf_tmfifo_vring *vring, bool is_rx)
> > break;
> > }
> >
> > + /* Insert paddings for discarded Tx packet. */
> > + if (!is_rx) {
> > + vring->tx_timeout = 0;
> > + while (vring->rem_padding >= sizeof(u64)) {
> > + writeq(0, vring->fifo->tx.data);
> > + vring->rem_padding -= sizeof(u64);
> > + if (--avail == 0)
> > + goto retry;
> > + }
> > + }
> > +
> > /* Console output always comes from the Tx buffer. */
> > if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
> > mlxbf_tmfifo_console_tx(fifo, avail);
> > @@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct
> mlxbf_tmfifo_vring *vring, bool is_rx)
> > /* Handle one descriptor. */
> > more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
> > } while (more);
> > +
> > + /* Check Tx timeout. */
> > + if (avail <= 0 && !is_rx)
> > + mlxbf_tmfifo_check_tx_timeout(vring);
> > }
> >
> > /* Handle Rx or Tx queues. */
> >