linux-2.6.14-rc2-mm1/include/asm-x86_64/atomic.h(size+7) / 8? or increment virt_addr by 0.5? :)
--- linux.vanilla-2.6.14-rc2-mm1/include/asm-x86_64/atomic.h 2005-09-22 15:22:11.000000000 +0100
+++ linux-2.6.14-rc2-mm1/include/asm-x86_64/atomic.h 2005-10-14 18:29:47.000000000 +0100
@@ -378,4 +378,16 @@
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
+/* ECC atomic, DMA, SMP and interrupt safe scrub function */
+
+static __inline__ void atomic_scrub(unsigned long *virt_addr, u32 size)
+{
+ u32 i;
+ for (i = 0; i < size / 4; i++, virt_addr++)
+ /* Very carefully read and write to memory atomicallyshouldn't that be addq?
+ * so we are interrupt, DMA and SMP safe.
+ */
+ __asm__ __volatile__("lock; addl $0, %0"::"m"(*virt_addr));
+}
+
#endif