RE: [PATCH v5 12/14] fm10k: Report PCIe link properties with pcie_print_link_status()

From: Keller, Jacob E
Date: Mon Apr 02 2018 - 11:56:17 EST


> -----Original Message-----
> From: Bjorn Helgaas [mailto:helgaas@xxxxxxxxxx]
> Sent: Friday, March 30, 2018 2:06 PM
> To: Tal Gilboa <talgi@xxxxxxxxxxxx>
> Cc: Tariq Toukan <tariqt@xxxxxxxxxxxx>; Keller, Jacob E
> <jacob.e.keller@xxxxxxxxx>; Ariel Elior <ariel.elior@xxxxxxxxxx>; Ganesh
> Goudar <ganeshgr@xxxxxxxxxxx>; Kirsher, Jeffrey T
> <jeffrey.t.kirsher@xxxxxxxxx>; everest-linux-l2@xxxxxxxxxx; intel-wired-
> lan@xxxxxxxxxxxxxxxx; netdev@xxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx;
> linux-pci@xxxxxxxxxxxxxxx
> Subject: [PATCH v5 12/14] fm10k: Report PCIe link properties with
> pcie_print_link_status()
>
> From: Bjorn Helgaas <bhelgaas@xxxxxxxxxx>
>
> Use pcie_print_link_status() to report PCIe link speed and possible
> limitations instead of implementing this in the driver itself.
>
> Note that pcie_get_minimum_link() can return misleading information because
> it finds the slowest link and the narrowest link without considering the
> total bandwidth of the link. If the path contains a 16 GT/s x1 link and a
> 2.5 GT/s x16 link, pcie_get_minimum_link() returns 2.5 GT/s x1, which
> corresponds to 250 MB/s of bandwidth, not the actual available bandwidth of
> about 2000 MB/s for a 16 GT/s x1 link.

This comment is about what's being fixed, so it would have been easier to parse if it were written to more clearly indicate that we're removing (and not adding) this behavior.

Aside from the commit message (which I don't feel strongly enough needs a re-send of the patch) this looks good to me.

Acked-by: Jacob Keller <jacob.e.keller@xxxxxxxxx>

Thanks Bjorn and Tal for fixing this!

>
> Signed-off-by: Bjorn Helgaas <bhelgaas@xxxxxxxxxx>
> ---
> drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 87 --------------------------
> 1 file changed, 1 insertion(+), 86 deletions(-)
>
> diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
> b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
> index a434fecfdfeb..aa05fb534942 100644
> --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
> +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
> @@ -2120,91 +2120,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
> return 0;
> }
>
> -static void fm10k_slot_warn(struct fm10k_intfc *interface)
> -{
> - enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
> - enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
> - struct fm10k_hw *hw = &interface->hw;
> - int max_gts = 0, expected_gts = 0;
> -
> - if (pcie_get_minimum_link(interface->pdev, &speed, &width) ||
> - speed == PCI_SPEED_UNKNOWN || width ==
> PCIE_LNK_WIDTH_UNKNOWN) {
> - dev_warn(&interface->pdev->dev,
> - "Unable to determine PCI Express bandwidth.\n");
> - return;
> - }
> -
> - switch (speed) {
> - case PCIE_SPEED_2_5GT:
> - /* 8b/10b encoding reduces max throughput by 20% */
> - max_gts = 2 * width;
> - break;
> - case PCIE_SPEED_5_0GT:
> - /* 8b/10b encoding reduces max throughput by 20% */
> - max_gts = 4 * width;
> - break;
> - case PCIE_SPEED_8_0GT:
> - /* 128b/130b encoding has less than 2% impact on throughput */
> - max_gts = 8 * width;
> - break;
> - default:
> - dev_warn(&interface->pdev->dev,
> - "Unable to determine PCI Express bandwidth.\n");
> - return;
> - }
> -
> - dev_info(&interface->pdev->dev,
> - "PCI Express bandwidth of %dGT/s available\n",
> - max_gts);
> - dev_info(&interface->pdev->dev,
> - "(Speed:%s, Width: x%d, Encoding Loss:%s, Payload:%s)\n",
> - (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
> - speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
> - speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
> - "Unknown"),
> - hw->bus.width,
> - (speed == PCIE_SPEED_2_5GT ? "20%" :
> - speed == PCIE_SPEED_5_0GT ? "20%" :
> - speed == PCIE_SPEED_8_0GT ? "<2%" :
> - "Unknown"),
> - (hw->bus.payload == fm10k_bus_payload_128 ? "128B" :
> - hw->bus.payload == fm10k_bus_payload_256 ? "256B" :
> - hw->bus.payload == fm10k_bus_payload_512 ? "512B" :
> - "Unknown"));
> -
> - switch (hw->bus_caps.speed) {
> - case fm10k_bus_speed_2500:
> - /* 8b/10b encoding reduces max throughput by 20% */
> - expected_gts = 2 * hw->bus_caps.width;
> - break;
> - case fm10k_bus_speed_5000:
> - /* 8b/10b encoding reduces max throughput by 20% */
> - expected_gts = 4 * hw->bus_caps.width;
> - break;
> - case fm10k_bus_speed_8000:
> - /* 128b/130b encoding has less than 2% impact on throughput */
> - expected_gts = 8 * hw->bus_caps.width;
> - break;
> - default:
> - dev_warn(&interface->pdev->dev,
> - "Unable to determine expected PCI Express
> bandwidth.\n");
> - return;
> - }
> -
> - if (max_gts >= expected_gts)
> - return;
> -
> - dev_warn(&interface->pdev->dev,
> - "This device requires %dGT/s of bandwidth for optimal
> performance.\n",
> - expected_gts);
> - dev_warn(&interface->pdev->dev,
> - "A %sslot with x%d lanes is suggested.\n",
> - (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
> - hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
> - hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " :
> ""),
> - hw->bus_caps.width);
> -}
> -
> /**
> * fm10k_probe - Device Initialization Routine
> * @pdev: PCI device information struct
> @@ -2326,7 +2241,7 @@ static int fm10k_probe(struct pci_dev *pdev, const
> struct pci_device_id *ent)
> mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
>
> /* print warning for non-optimal configurations */
> - fm10k_slot_warn(interface);
> + pcie_print_link_status(interface->pdev);
>
> /* report MAC address for logging */
> dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);