Hi Linus and All,
This patch breaks down the three global IPC locks into one lock per IPC
ID.
In current implementation, the operations on any IPC semaphores are
synchronized by one single IPC semaphore lock. Changing the IPC locks
from one lock per IPC resource type into one lock per IPC ID makes sense
to me. By doing so could reduce the possible lock contention in some
applications where the IPC resources are heavily used.
Test results from the LMbench Pipe and IPC latency test shows this patch
improves the performance of those functions from 1% to 9%.
Patch applies to 2.5.30 kernel. Please consider it.
-- Mingming Cao
diff -urN -X ../dontdiff ../base/linux-2.5.30/ipc/util.c 2.5.30-ipc/ipc/util.c --- ../base/linux-2.5.30/ipc/util.c Thu Aug 1 14:16:21 2002 +++ 2.5.30-ipc/ipc/util.c Fri Aug 2 16:06:19 2002 @@ -74,9 +74,11 @@ printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); ids->size = 0; } - ids->ary = SPIN_LOCK_UNLOCKED; - for(i=0;i<ids->size;i++) + ids->ary_lock =RW_LOCK_UNLOCKED; + for(i=0;i<ids->size;i++) { ids->entries[i].p = NULL; + ids->entries[i].lock = SPIN_LOCK_UNLOCKED; + } } /** @@ -119,14 +121,15 @@ memcpy(new, ids->entries, sizeof(struct ipc_id)*ids->size); for(i=ids->size;i<newsize;i++) { new[i].p = NULL; + new[i].lock = SPIN_LOCK_UNLOCKED; } - spin_lock(&ids->ary); + write_lock(&ids->ary_lock); old = ids->entries; ids->entries = new; i = ids->size; ids->size = newsize; - spin_unlock(&ids->ary); + write_unlock(&ids->ary_lock); ipc_free(old, sizeof(struct ipc_id)*i); return ids->size; } @@ -165,7 +168,8 @@ if(ids->seq > ids->seq_max) ids->seq = 0; - spin_lock(&ids->ary); + read_lock(&ids->ary_lock); + spin_lock(&ids->entries[id].lock); ids->entries[id].p = new; return id; } diff -urN -X ../dontdiff ../base/linux-2.5.30/ipc/util.h 2.5.30-ipc/ipc/util.h --- ../base/linux-2.5.30/ipc/util.h Thu Aug 1 14:16:28 2002 +++ 2.5.30-ipc/ipc/util.h Fri Aug 2 16:06:19 2002 @@ -20,11 +20,13 @@ unsigned short seq_max; struct semaphore sem; spinlock_t ary; + rwlock_t ary_lock; struct ipc_id* entries; }; struct ipc_id { struct kern_ipc_perm* p; + spinlock_t lock; }; @@ -72,16 +74,25 @@ if(lid >= ids->size) return NULL; - spin_lock(&ids->ary); + /*spin_lock(&ids->ary);*/ + read_lock(&ids->ary_lock); + spin_lock(&ids->entries[lid].lock); out = ids->entries[lid].p; - if(out==NULL) - spin_unlock(&ids->ary); + if(out==NULL) { + spin_unlock(&ids->entries[lid].lock); + read_unlock(&ids->ary_lock); + } return out; } extern inline void ipc_unlock(struct ipc_ids* ids, int id) { - spin_unlock(&ids->ary); + int lid = id % SEQ_MULTIPLIER; + if(lid >= ids->size) + return; + + spin_unlock(&ids->entries[lid].lock); + read_unlock(&ids->ary_lock); } extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq)
- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
This archive was generated by hypermail 2b29 : Wed Aug 07 2002 - 22:00:28 EST