/* * Copyright (C) 2000 Regents of the University of California * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Mutex functions. * MUTEX_SPIN spinlock * MUTEX_SPIN_INTR spinlock with interrupts disabled * MUTEX_BLOCK semaphore * * Define -DDEBUG_MUTEX to get debugging spinlocks on alpha * (will report file/line no. of stuck spinlocks) * * Jim Garlick */ #if !defined(_LINUX_MUTEX_H) #define _LINUX_MUTEX_H #if defined(__KERNEL__) #if defined(DEBUG_MUTEX) && defined(__alpha__) && !defined(DEBUG_SPINLOCK) #define DEBUG_SPINLOCK #endif #include #include #include #include #include #include #if !defined(DEBUG_SPINLOCK) #define debug_spin_lock(lock, file, line) spin_lock(lock) #define debug_spin_trylock(lock, file, line) spin_trylock(lock) #endif #define mutex_trylock(l) debug_mutex_trylock(l, __BASE_FILE__, __LINE__) #define mutex_lock(l) debug_mutex_lock(l, __BASE_FILE__, __LINE__) #define PID_NONE (PID_MAX + 1) #define PID_INTR (PID_MAX + 2 + smp_processor_id()) #define MY_PID (in_interrupt() ? PID_INTR : current->pid) #define MY_CPU smp_processor_id() #define MUTEX_MAX_NAME 16 typedef enum { MUTEX_SPIN, MUTEX_SPIN_INTR, MUTEX_BLOCK } lock_type_t; typedef struct { lock_type_t type; union { struct { spinlock_t lock; unsigned long flags; } spin; struct { struct semaphore lock; } blocking; } mutex_u; pid_t holder; #if defined(DEBUG_MUTEX) char name[MUTEX_MAX_NAME]; #endif } mutex_t; /* binary semaphores */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) #define BS_INIT(s) (s) = MUTEX #else #define BS_INIT(s) init_MUTEX(&(s)) #endif #define BS_TRY(s) (down_trylock(&(s)) == 0) #define BS_LOCK(s) down(&(s)) #define BS_UNLOCK(s) up(&(s)) extern __inline__ void mutex_init(mutex_t *l, char *name, lock_type_t type) { l->type = type; switch(l->type) { case MUTEX_BLOCK: BS_INIT(l->mutex_u.blocking.lock); break; case MUTEX_SPIN: case MUTEX_SPIN_INTR: l->mutex_u.spin.lock = SPIN_LOCK_UNLOCKED; break; } l->holder = PID_NONE; #if defined(DEBUG_MUTEX) strncpy(l->name, name, MUTEX_MAX_NAME); #endif } extern __inline__ void mutex_destroy(mutex_t *l) { ASSERT(l->holder == PID_NONE); } /* * Return nonzero if lock is held by this thread. */ extern __inline__ int mutex_held(mutex_t *l) { return (l->holder == MY_PID); } /* * really we want to be using spin_lock_irqsave/spin_unlock_irqrestore - * however there's no trylock functional interface. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0) #if defined(__alpha__) || defined(__sparc__) #define local_irq_save(x) __save_and_cli(x) #define local_irq_restore(x) __restore_flags(x) #elif defined(__i386__) #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") #define local_irq_restore(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory") #else #error UNKNOWN ARCH. #endif #endif extern __inline__ void debug_mutex_lock(mutex_t *l, char *file, int line) { unsigned long flags; ASSERT(!mutex_held(l)); switch(l->type) { case MUTEX_BLOCK: ASSERT(!in_interrupt()); BS_LOCK(l->mutex_u.blocking.lock); break; case MUTEX_SPIN_INTR: local_irq_save(flags); debug_spin_lock(&l->mutex_u.spin.lock, file, line); l->mutex_u.spin.flags = flags; break; case MUTEX_SPIN: debug_spin_lock(&l->mutex_u.spin.lock, file, line); break; } l->holder = MY_PID; } extern __inline__ void mutex_unlock(mutex_t *l) { unsigned long flags; ASSERT(mutex_held(l)); l->holder = PID_NONE; switch(l->type) { case MUTEX_BLOCK: BS_UNLOCK(l->mutex_u.blocking.lock); break; case MUTEX_SPIN_INTR: flags = l->mutex_u.spin.flags; spin_unlock(&l->mutex_u.spin.lock); local_irq_restore(flags); break; case MUTEX_SPIN: spin_unlock(&l->mutex_u.spin.lock); break; } } /* * Unordered lock releasing - if a spinlock, we need to make sure we * when l1 is released, we __restore_flags() the value from l. * * Usage: * mutex_lock(l) * mutex_lock(l1) * mutex_unlock_unordered(l, l1) * mutex_unlock(l1) */ extern __inline__ void mutex_unlock_unordered(mutex_t *l, mutex_t *l1) { unsigned long flags; ASSERT(mutex_held(l)); ASSERT(mutex_held(l1)); l->holder = PID_NONE; switch (l->type) { case MUTEX_BLOCK: BS_UNLOCK(l->mutex_u.blocking.lock); break; case MUTEX_SPIN_INTR: ASSERT(l1->type == MUTEX_SPIN_INTR); flags = l->mutex_u.spin.flags; spin_unlock(&l->mutex_u.spin.lock); local_irq_restore(l1->mutex_u.spin.flags); l1->mutex_u.spin.flags = flags; break; case MUTEX_SPIN: spin_unlock(&l->mutex_u.spin.lock); break; } } /* * Returns nonzero if lock has been acquired. */ extern __inline__ int debug_mutex_trylock(mutex_t *l, char *file, int line) { int res; unsigned long flags; ASSERT(!mutex_held(l)); switch(l->type) { case MUTEX_BLOCK: ASSERT(!in_interrupt()); res = BS_TRY(l->mutex_u.blocking.lock); break; case MUTEX_SPIN_INTR: local_irq_save(flags); res = (debug_spin_trylock(&l->mutex_u.spin.lock, file, line) != 0); if (res) l->mutex_u.spin.flags = flags; else local_irq_restore(flags); break; case MUTEX_SPIN: res = (debug_spin_trylock(&l->mutex_u.spin.lock, file, line) != 0); break; } if (res) l->holder = MY_PID; return res; } #endif /* __KERNEL__ */ #endif /* _LINUX_MUTEX_H */