This patch makes the list macros use smp-only version of the barriers,
no need to hurt UP performance. It got dropped from my list of things
to push long ago.
Thanks
Dipankar
diff -urN linux-2.5.65-base/include/linux/list.h linux-2.5.65-nowmb/include/linux/list.h
--- linux-2.5.65-base/include/linux/list.h 2003-03-18 03:14:07.000000000 +0530
+++ linux-2.5.65-nowmb/include/linux/list.h 2003-03-20 18:33:12.000000000 +0530
@@ -84,7 +84,7 @@
{
new->next = next;
new->prev = prev;
- wmb();
+ smp_wmb();
next->prev = new;
prev->next = new;
}
@@ -303,11 +303,11 @@
*/
#define list_for_each_rcu(pos, head) \
for (pos = (head)->next, prefetch(pos->next); pos != (head); \
- pos = pos->next, ({ read_barrier_depends(); 0;}), prefetch(pos->next))
+ pos = pos->next, ({ smp_read_barrier_depends(); 0;}), prefetch(pos->next))
#define __list_for_each_rcu(pos, head) \
for (pos = (head)->next; pos != (head); \
- pos = pos->next, ({ read_barrier_depends(); 0;}))
+ pos = pos->next, ({ smp_read_barrier_depends(); 0;}))
/**
* list_for_each_safe_rcu - iterate over an rcu-protected list safe
@@ -318,7 +318,7 @@
*/
#define list_for_each_safe_rcu(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, ({ read_barrier_depends(); 0;}), n = pos->next)
+ pos = n, ({ smp_read_barrier_depends(); 0;}), n = pos->next)
/*
* Double linked lists with a single pointer list head.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
This archive was generated by hypermail 2b29 : Sun Mar 23 2003 - 22:00:30 EST