Re: [PATCH v3 05/18] dmaengine: idxd: add IMS support in base driver

From: Dave Jiang
Date: Thu Oct 01 2020 - 16:48:17 EST




On 9/30/2020 11:47 AM, Thomas Gleixner wrote:
On Tue, Sep 15 2020 at 16:28, Dave Jiang wrote:
struct idxd_device {
@@ -170,6 +171,7 @@ struct idxd_device {
int num_groups;
+ u32 ims_offset;
u32 msix_perm_offset;
u32 wqcfg_offset;
u32 grpcfg_offset;
@@ -177,6 +179,7 @@ struct idxd_device {
u64 max_xfer_bytes;
u32 max_batch_size;
+ int ims_size;
int max_groups;
int max_engines;
int max_tokens;
@@ -196,6 +199,7 @@ struct idxd_device {
struct work_struct work;
int *int_handles;
+ struct sbitmap ims_sbmap;

This bitmap is needed for what?

Nothing anymore. I forgot to remove. All this is handled by MSI core now with code from you.


--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -231,10 +231,51 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
idxd->msix_perm_offset = offsets.msix_perm * 0x100;
dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
idxd->msix_perm_offset);
+ idxd->ims_offset = offsets.ims * 0x100;

Magic constant pulled out of thin air. #define ....

Will fix


+ dev_dbg(dev, "IDXD IMS Offset: %#x\n", idxd->ims_offset);
idxd->perfmon_offset = offsets.perfmon * 0x100;
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
+#define PCI_DEVSEC_CAP 0x23
+#define SIOVDVSEC1(offset) ((offset) + 0x4)
+#define SIOVDVSEC2(offset) ((offset) + 0x8)
+#define DVSECID 0x5
+#define SIOVCAP(offset) ((offset) + 0x14)
+
+static void idxd_check_siov(struct idxd_device *idxd)
+{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int dvsec;
+ u16 val16;
+ u32 val32;
+
+ dvsec = pci_find_ext_capability(pdev, PCI_DEVSEC_CAP);
+ pci_read_config_word(pdev, SIOVDVSEC1(dvsec), &val16);
+ if (val16 != PCI_VENDOR_ID_INTEL) {
+ dev_dbg(&pdev->dev, "DVSEC vendor id is not Intel\n");
+ return;
+ }
+
+ pci_read_config_word(pdev, SIOVDVSEC2(dvsec), &val16);
+ if (val16 != DVSECID) {
+ dev_dbg(&pdev->dev, "DVSEC ID is not SIOV\n");
+ return;
+ }
+
+ pci_read_config_dword(pdev, SIOVCAP(dvsec), &val32);
+ if ((val32 & 0x1) && idxd->hw.gen_cap.max_ims_mult) {
+ idxd->ims_size = idxd->hw.gen_cap.max_ims_mult * 256ULL;
+ dev_dbg(dev, "IMS size: %u\n", idxd->ims_size);
+ set_bit(IDXD_FLAG_SIOV_SUPPORTED, &idxd->flags);
+ dev_dbg(&pdev->dev, "IMS supported for device\n");
+ return;
+ }
+
+ dev_dbg(&pdev->dev, "SIOV unsupported for device\n");

It's really hard to find the code inside all of this dev_dbg()
noise. But why is this capability check done in this driver? Is this
capability stuff really IDXD specific or is the next device which
supports this going to copy and pasta the above?

Will look into move this into a common detection function for all similar devices. This should be common for all Intel devices that support SIOV.


static void idxd_read_caps(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
@@ -253,6 +294,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
+ idxd_check_siov(idxd);
if (idxd->hw.gen_cap.config_en)
set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
@@ -347,9 +389,19 @@ static int idxd_probe(struct idxd_device *idxd)
idxd->major = idxd_cdev_get_major(idxd);
+ if (idxd->ims_size) {
+ rc = sbitmap_init_node(&idxd->ims_sbmap, idxd->ims_size, -1,
+ GFP_KERNEL, dev_to_node(dev));
+ if (rc < 0)
+ goto sbitmap_fail;
+ }

Ah, here the bitmap is allocated, but it's still completely unclear what
it is used for.

Need to remove.


The subject line is misleading as hell. This does not add support, it's
doing some magic capability checks and allocates stuff which nobody
knows what it is used for.

With the unneeded code removal and moving the SIOV detection code to common implementation, it should be more clear.


Thanks,

tglx