[PATCH v5 8/8] nvmet: report NPDGL and NPDAL
From: Caleb Sander Mateos
Date: Fri Feb 27 2026 - 15:24:57 EST
A block device with a very large discard_granularity queue limit may not
be able to report it in the 16-bit NPDG and NPDA fields in the Identify
Namespace data structure. For this reason, version 2.1 of the NVMe specs
added 32-bit fields NPDGL and NPDAL to the NVM Command Set Specific
Identify Namespace structure. So report the discard_granularity there
too and set OPTPERF to 11b to indicate those fields are supported.
Signed-off-by: Caleb Sander Mateos <csander@xxxxxxxxxxxxxxx>
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
---
drivers/nvme/target/admin-cmd.c | 2 ++
drivers/nvme/target/io-cmd-bdev.c | 19 +++++++++++++++----
drivers/nvme/target/nvmet.h | 2 ++
3 files changed, 19 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 3da31bb1183e..72e733b62a2c 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1056,10 +1056,12 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id) {
status = NVME_SC_INTERNAL;
goto out;
}
+ if (req->ns->bdev)
+ nvmet_bdev_set_nvm_limits(req->ns->bdev, id);
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
kfree(id);
out:
nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index d94f885a56d9..485b5cd42e4f 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -28,15 +28,15 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->nawun = lpp0b;
id->nawupf = lpp0b;
id->nacwu = lpp0b;
/*
- * OPTPERF = 01b indicates that the fields NPWG, NPWA, NPDG, NPDA, and
- * NOWS are defined for this namespace and should be used by
- * the host for I/O optimization.
+ * OPTPERF = 11b indicates that the fields NPWG, NPWA, NPDG, NPDA,
+ * NPDGL, NPDAL, and NOWS are defined for this namespace and should be
+ * used by the host for I/O optimization.
*/
- id->nsfeat |= 0x1 << NVME_NS_FEAT_OPTPERF_SHIFT;
+ id->nsfeat |= 0x3 << NVME_NS_FEAT_OPTPERF_SHIFT;
/* NPWG = Namespace Preferred Write Granularity. 0's based */
id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev));
/* NPWA = Namespace Preferred Write Alignment. 0's based */
id->npwa = id->npwg;
/* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
@@ -50,10 +50,21 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
/* Set WZDS and DRB if device supports unmapped write zeroes */
if (bdev_write_zeroes_unmap_sectors(bdev))
id->dlfeat = (1 << 3) | 0x1;
}
+void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
+ struct nvme_id_ns_nvm *id)
+{
+ /*
+ * NPDGL = Namespace Preferred Deallocate Granularity Large
+ * NPDAL = Namespace Preferred Deallocate Alignment Large
+ */
+ id->npdgl = id->npdal = cpu_to_le32(bdev_discard_granularity(bdev) /
+ bdev_logical_block_size(bdev));
+}
+
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
{
if (ns->bdev_file) {
fput(ns->bdev_file);
ns->bdev = NULL;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index b664b584fdc8..3a7efd9cb81a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -547,10 +547,12 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
u32 nvmet_connect_cmd_data_len(struct nvmet_req *req);
void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
+void nvmet_bdev_set_nvm_limits(struct block_device *bdev,
+ struct nvme_id_ns_nvm *id);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
u32 nvmet_admin_cmd_data_len(struct nvmet_req *req);
u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
--
2.45.2