On Wed, Jan 24, 2024 at 11:38:41AM +0000, John Garry wrote:
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.cAren't these new fields, atomic_max and atomic_boundary, duplicates of
index 5045c84f2516..6a34a5d92088 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -911,6 +911,32 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+ /*
+ * Ensure that nothing has been sent which cannot be executed
+ * atomically.
+ */
+ if (req->cmd_flags & REQ_ATOMIC) {
+ struct nvme_ns_head *head = ns->head;
+ u32 boundary_bytes = head->atomic_boundary;
+
+ if (blk_rq_bytes(req) > ns->head->atomic_max)
+ return BLK_STS_IOERR;
+
+ if (boundary_bytes) {
+ u32 mask = boundary_bytes - 1, imask = ~mask;
+ u32 start = blk_rq_pos(req) << SECTOR_SHIFT;
+ u32 end = start + blk_rq_bytes(req);
+
+ if (blk_rq_bytes(req) > boundary_bytes)
+ return BLK_STS_IOERR;
+
+ if (((start & imask) != (end & imask)) &&
+ (end & mask)) {
+ return BLK_STS_IOERR;
+ }
+ }
+ }
the equivalent queue limits? Let's just use the queue limits instead.
And couldn't we generically validate the constraints are not violated in
submit_bio_noacct() instead of doing that in the low level driver? The
driver assumes all other requests are already sanity checked, so I don't
think we should change the responsibility for that just for this flag.