Re: [PATCH net-next] xsk: Avoid starving xsk at the end of the list

From: 黄杰
Date: Sun Oct 15 2023 - 23:10:43 EST


Albert Huang <huangjie.albert@xxxxxxxxxxxxx> 于2023年10月13日周五 14:33写道:
>
> In the previous implementation, when multiple xsk sockets were
> associated with a single xsk_buff_pool, a situation could arise
> where the xsk_tx_list maintained data at the front for one xsk
> socket while starving the xsk sockets at the back of the list.
> This could result in issues such as the inability to transmit packets,
> increased latency, and jitter. To address this problem, we introduced
> a new variable called tx_budget_cache, which limits each xsk to transmit
> a maximum of MAX_XSK_TX_BUDGET tx descriptors. This allocation ensures
> equitable opportunities for subsequent xsk sockets to send tx descriptors.
> The value of MAX_XSK_TX_BUDGET is temporarily set to 16.
>
> Signed-off-by: Albert Huang <huangjie.albert@xxxxxxxxxxxxx>
> ---
> include/net/xdp_sock.h | 6 ++++++
> net/xdp/xsk.c | 17 +++++++++++++++++
> 2 files changed, 23 insertions(+)
>
> diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
> index 69b472604b86..f617ff54e38c 100644
> --- a/include/net/xdp_sock.h
> +++ b/include/net/xdp_sock.h
> @@ -44,6 +44,7 @@ struct xsk_map {
> struct xdp_sock __rcu *xsk_map[];
> };
>
> +#define MAX_XSK_TX_BUDGET 16
> struct xdp_sock {
> /* struct sock must be the first member of struct xdp_sock */
> struct sock sk;
> @@ -63,6 +64,11 @@ struct xdp_sock {
>
> struct xsk_queue *tx ____cacheline_aligned_in_smp;
> struct list_head tx_list;
> + /* Record the actual number of times xsk has transmitted a tx
> + * descriptor, with a maximum limit not exceeding MAX_XSK_TX_BUDGET
> + */
> + u32 tx_budget_cache;
> +
> /* Protects generic receive. */
> spinlock_t rx_lock;
>
> diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
> index f5e96e0d6e01..bf964456e9b1 100644
> --- a/net/xdp/xsk.c
> +++ b/net/xdp/xsk.c
> @@ -413,16 +413,25 @@ EXPORT_SYMBOL(xsk_tx_release);
>
> bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
> {
> + u32 xsk_full_count = 0;
> struct xdp_sock *xs;
>
> rcu_read_lock();
> +again:
> list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
> + if (xs->tx_budget_cache >= MAX_XSK_TX_BUDGET) {
> + xsk_full_count++;
> + continue;
> + }
> +
> if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
> if (xskq_has_descs(xs->tx))
> xskq_cons_release(xs->tx);
> continue;
> }
>
> + xs->tx_budget_cache++;
> +
> /* This is the backpressure mechanism for the Tx path.
> * Reserve space in the completion queue and only proceed
> * if there is space in it. This avoids having to implement
> @@ -436,6 +445,13 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
> return true;
> }
>
> + if (unlikely(xsk_full_count > 0)) {
> + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
> + xs->tx_budget_cache = 0;
> + }
> + goto again;
> + }

xsk_full_count should set to 0, I will resend another patch to fix this.

> +
> out:
> rcu_read_unlock();
> return false;
> @@ -1230,6 +1246,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
> xs->zc = xs->umem->zc;
> xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
> xs->queue_id = qid;
> + xs->tx_budget_cache = 0;
> xp_add_xsk(xs->pool, xs);
>
> out_unlock:
> --
> 2.20.1
>