[PATCH v3 4/9] driver core: Replace dev->dma_skip_sync with DEV_FLAG_DMA_SKIP_SYNC
From: Douglas Anderson
Date: Thu Apr 02 2026 - 20:54:21 EST
In C, bitfields are not necessarily safe to modify from multiple
threads without locking. Switch "dma_skip_sync" over to the "flags"
field so modifications are safe.
Cc: Alexander Lobakin <aleksander.lobakin@xxxxxxxxx>
Cc: Eric Dumazet <edumazet@xxxxxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Douglas Anderson <dianders@xxxxxxxxxxxx>
---
Not fixing any known bugs; problem is theoretical and found by code
inspection. Change is done somewhat manually and only lightly tested
(mostly compile-time tested).
NOTE: even though previously we only took up a bit if
CONFIG_DMA_NEED_SYNC, in this change I reserve the bit
unconditionally. While we could get the "dynamic" behavior by changing
the flags definition to be an "enum", it doesn't seem worth it at this
point.
Changes in v3:
- New
include/linux/device.h | 7 +++----
include/linux/dma-map-ops.h | 4 ++--
include/linux/dma-mapping.h | 2 +-
kernel/dma/mapping.c | 8 ++++----
mm/hmm.c | 2 +-
5 files changed, 11 insertions(+), 12 deletions(-)
diff --git a/include/linux/device.h b/include/linux/device.h
index e74c7d0813ce..e900748d3038 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -470,11 +470,14 @@ struct device_physical_location {
* until other devices probe successfully.
* @DEV_FLAG_DMA_IOMMU: Device is using default IOMMU implementation for DMA and
* doesn't rely on dma_ops structure.
+ * @DEV_FLAG_DMA_SKIP_SYNC: DMA sync operations can be skipped for coherent
+ * buffers.
*/
enum struct_device_flags {
DEV_FLAG_READY_TO_PROBE,
DEV_FLAG_CAN_MATCH,
DEV_FLAG_DMA_IOMMU,
+ DEV_FLAG_DMA_SKIP_SYNC,
};
/**
@@ -566,7 +569,6 @@ enum struct_device_flags {
* and optionall (if the coherent mask is large enough) also
* for dma allocations. This flag is managed by the dma ops
* instance from ->dma_supported.
- * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
* @flags: DEV_FLAG_XXX flags. Use atomic bitfield operations to modify.
*
* At the lowest level, every device in a Linux system is represented by an
@@ -683,9 +685,6 @@ struct device {
#ifdef CONFIG_DMA_OPS_BYPASS
bool dma_ops_bypass : 1;
#endif
-#ifdef CONFIG_DMA_NEED_SYNC
- bool dma_skip_sync:1;
-#endif
unsigned long flags;
};
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 60b63756df82..4d9d1fe3277c 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -245,8 +245,8 @@ static inline void dma_reset_need_sync(struct device *dev)
{
#ifdef CONFIG_DMA_NEED_SYNC
/* Reset it only once so that the function can be called on hotpath */
- if (unlikely(dev->dma_skip_sync))
- dev->dma_skip_sync = false;
+ if (unlikely(test_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags)))
+ clear_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags);
#endif
}
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 99ef042ecdb4..23273b0fe84e 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -419,7 +419,7 @@ bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr);
static inline bool dma_dev_need_sync(const struct device *dev)
{
/* Always call DMA sync operations when debugging is enabled */
- return !dev->dma_skip_sync || IS_ENABLED(CONFIG_DMA_API_DEBUG);
+ return !test_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags) || IS_ENABLED(CONFIG_DMA_API_DEBUG);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 6d3dd0bd3a88..f50b648ed460 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -467,7 +467,7 @@ bool dma_need_unmap(struct device *dev)
{
if (!dma_map_direct(dev, get_dma_ops(dev)))
return true;
- if (!dev->dma_skip_sync)
+ if (!test_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags))
return true;
return IS_ENABLED(CONFIG_DMA_API_DEBUG);
}
@@ -483,16 +483,16 @@ static void dma_setup_need_sync(struct device *dev)
* mapping, if any. During the device initialization, it's
* enough to check only for the DMA coherence.
*/
- dev->dma_skip_sync = dev_is_dma_coherent(dev);
+ assign_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags, dev_is_dma_coherent(dev));
else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu &&
!ops->sync_sg_for_device && !ops->sync_sg_for_cpu)
/*
* Synchronization is not possible when none of DMA sync ops
* is set.
*/
- dev->dma_skip_sync = true;
+ set_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags);
else
- dev->dma_skip_sync = false;
+ clear_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags);
}
#else /* !CONFIG_DMA_NEED_SYNC */
static inline void dma_setup_need_sync(struct device *dev) { }
diff --git a/mm/hmm.c b/mm/hmm.c
index 5955f2f0c83d..137450e096bc 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -709,7 +709,7 @@ int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map,
* best approximation to ensure no swiotlb buffering happens.
*/
#ifdef CONFIG_DMA_NEED_SYNC
- dma_need_sync = !dev->dma_skip_sync;
+ dma_need_sync = !test_bit(DEV_FLAG_DMA_SKIP_SYNC, &dev->flags);
#endif /* CONFIG_DMA_NEED_SYNC */
if (dma_need_sync || dma_addressing_limited(dev))
return -EOPNOTSUPP;
--
2.53.0.1213.gd9a14994de-goog