Re: [PATCH v2 4/4] net: ntb_netdev: Support ethtool channels for multi-queue
From: Koichiro Den
Date: Thu Mar 05 2026 - 08:47:45 EST
On Wed, Mar 04, 2026 at 05:19:30PM -0800, Jakub Kicinski wrote:
> On Sat, 28 Feb 2026 23:55:38 +0900 Koichiro Den wrote:
> > +static const struct ntb_queue_handlers ntb_netdev_handlers;
>
> can we move some code around to avoid this forward declaration?
Sure. I'll do so by placing ntb_netdev_handlers() right after
ntb_netdev_tx_handler().
>
> > +static int ntb_set_channels(struct net_device *ndev,
> > + struct ethtool_channels *channels)
> > +{
> > + struct ntb_netdev *dev = netdev_priv(ndev);
> > + unsigned int new = channels->combined_count;
> > + unsigned int old = dev->num_queues;
> > + bool running = netif_running(ndev);
> > + struct ntb_netdev_queue *queue;
> > + unsigned int q, created;
> > + int rc = 0;
> > +
> > + if (channels->rx_count || channels->tx_count || channels->other_count)
> > + return -EINVAL;
>
> Pretty sure core will protect from this since your get will return 0
> for corresponding max values
>
> > + if (!new || new > ndev->num_tx_queues)
> > + return -ERANGE;
>
> same, core should protect against this
Right, these are redundant as ethnl_set_channels() does the checks. I'll drop
them.
>
> > + if (new == old)
> > + return 0;
> > +
> > + if (new < old) {
> > + if (running)
> > + for (q = new; q < old; q++)
> > + netif_stop_subqueue(ndev, q);
> > +
> > + rc = netif_set_real_num_queues(ndev, new, new);
> > + if (rc)
> > + goto out_restore;
> > +
> > + /* Publish new queue count before invalidating QP pointers */
> > + dev->num_queues = new;
> > +
> > + for (q = new; q < old; q++) {
> > + queue = &dev->queues[q];
> > +
> > + if (running) {
> > + ntb_transport_link_down(queue->qp);
> > + ntb_netdev_queue_rx_drain(queue);
> > + timer_delete_sync(&queue->tx_timer);
> > + }
> > +
> > + ntb_transport_free_queue(queue->qp);
> > + queue->qp = NULL;
> > + }
> > +
> > + goto out_restore;
> > + }
> > +
> > + created = old;
> > + for (q = old; q < new; q++) {
> > + queue = &dev->queues[q];
> > +
> > + queue->ntdev = dev;
> > + queue->qid = q;
> > + queue->qp = ntb_transport_create_queue(queue, dev->client_dev,
> > + &ntb_netdev_handlers);
> > + if (!queue->qp) {
> > + rc = -ENOSPC;
> > + goto err_new;
> > + }
> > + created++;
> > +
> > + if (!running)
> > + continue;
> > +
> > + timer_setup(&queue->tx_timer, ntb_netdev_tx_timer, 0);
> > +
> > + rc = ntb_netdev_queue_rx_fill(ndev, queue);
> > + if (rc)
> > + goto err_new;
> > +
> > + /*
> > + * Carrier may already be on due to other QPs. Keep the new
> > + * subqueue stopped until we get a Link Up event for this QP.
> > + */
> > + netif_stop_subqueue(ndev, q);
> > + }
> > +
> > + rc = netif_set_real_num_queues(ndev, new, new);
> > + if (rc)
> > + goto err_new;
> > +
> > + dev->num_queues = new;
> > +
> > + if (running)
> > + for (q = old; q < new; q++)
> > + ntb_transport_link_up(dev->queues[q].qp);
> > +
> > + return 0;
> > +
> > +err_new:
> > + if (running) {
> > + unsigned int rollback = created;
> > +
> > + while (rollback-- > old) {
> > + queue = &dev->queues[rollback];
> > + ntb_transport_link_down(queue->qp);
> > + ntb_netdev_queue_rx_drain(queue);
> > + timer_delete_sync(&queue->tx_timer);
> > + }
> > + }
> > +
> > + while (created-- > old) {
> > + queue = &dev->queues[created];
> > + ntb_transport_free_queue(queue->qp);
> > + queue->qp = NULL;
> > + }
> > +
> > +out_restore:
> > + if (running) {
> > + ntb_netdev_sync_subqueues(dev);
> > + ntb_netdev_update_carrier(dev);
> > + }
> > + return rc;
> > +}
>
> The logic LGTM, but the function is a little long.
> Would it be possible to split it up?
Yes, I'll split it up in v3.
While doing so, I also noticed that ntb_netdev_sync_subqueues() does more than
necessary. In particular, re-syncing all remaining subqueues based solely on
link state is not the right granularity for set_channels(), and can also
unnecessarily override subqueue stop state unrelated to resizing.
So in v3, I'd like to drop ntb_netdev_sync_subqueues() entirely and make the
rollback behavior more explicit and compact.
Thanks for the review,
Koichiro