--- linux/kernel/sched.c~ Fri Jun 23 11:13:41 2000 +++ linux/kernel/sched.c Fri Jun 23 11:18:48 2000 @@ -60,8 +60,8 @@ struct task_struct * init_tasks[NR_CPUS] * The run-queue lock locks the parts that actually access * and change the run-queues, and have to be interrupt-safe. */ -__cacheline_aligned spinlock_t runqueue_lock = SPIN_LOCK_UNLOCKED; /* second */ -__cacheline_aligned rwlock_t tasklist_lock = RW_LOCK_UNLOCKED; /* third */ + spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* second */ +rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* third */ static LIST_HEAD(runqueue_head);