[PATCH 2/8] block: implement blk kmap helpers
From: Tejun Heo
Date: Fri Jan 13 2006 - 10:24:16 EST
When block requests are handled via DMA dma mapping functions take
care of cache coherency. Unfortunately, cache coherencly was left
unhandled until now for block PIOs, resulting in data corruption
issues on architectures with aliasing caches.
All block PIO operations use kmap/unmap to access target memory area
and the mapping/unmapping points are the perfect places for cache
flushing. kmap/unmap are to PIO'ing cpus what dma_map/unmap are to
DMAing devices.
This patch implements blk kmap helpers which additionally take
@direction argument and deal with cache coherency.
Signed-off-by: Tejun Heo <htejun@xxxxxxxxx>
---
include/linux/blkdev.h | 38 ++++++++++++++++++++++++++++++++++++++
1 files changed, 38 insertions(+), 0 deletions(-)
fdaeda6742b70451ddbb860b440d2533c6591fda
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 02a585f..1040029 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -17,6 +17,10 @@
#include <asm/scatterlist.h>
+/* for PIO kmap helpers */
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+
struct request_queue;
typedef struct request_queue request_queue_t;
struct elevator_queue;
@@ -812,6 +816,40 @@ static inline void put_dev_sector(Sector
page_cache_release(p.v);
}
+/*
+ * PIO kmap helpers.
+ *
+ * Block PIO requires cache flushes on architectures with aliasing
+ * caches. If a driver wants to perform PIO on a user-mappable page
+ * (page cache page), it MUST use one of the following kmap/unmap
+ * helpers unless it handles cache coherency itself.
+ */
+static inline void * blk_kmap_atomic(struct page *page, enum km_type type,
+ enum dma_data_direction dir)
+{
+ return kmap_atomic(page, type);
+}
+
+static inline void blk_kunmap_atomic(void *addr, enum km_type type,
+ enum dma_data_direction dir)
+{
+ if (dir == DMA_BIDIRECTIONAL || dir == DMA_FROM_DEVICE)
+ flush_dcache_page(kmap_atomic_to_page(addr));
+ kunmap_atomic(addr, type);
+}
+
+static inline void * blk_kmap(struct page *page, enum dma_data_direction dir)
+{
+ return kmap(page);
+}
+
+static inline void blk_kunmap(struct page *page, enum dma_data_direction dir)
+{
+ if (dir == DMA_BIDIRECTIONAL || dir == DMA_FROM_DEVICE)
+ flush_dcache_page(page);
+ kunmap(page);
+}
+
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
void kblockd_flush(void);
--
1.0.6
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/