get_vm_area needs spin lock

David Grothe (dave@gcom.com)
Wed, 30 Dec 1998 10:27:57 -0600


This is a multi-part message in MIME format.
--------------D18E65256A1F34E347650765
Content-Type: text/plain; charset=us-ascii
Content-Transfer-Encoding: 7bit

Kernel hackers:

As a result of some correspondence concerning the routine get_vm_area, I
have been taking a close look at that routine. It seems to me that in
an SMP environment this routine needs spin lock protection around the
loop that runs down the linked list of areas. If two or more CPUs are
executing this routine simultaneously the list could get tangled.

I am enclosing a patch for 2.2.0pre1 (mm/vmalloc.c) that I think fixes
this problem. The patch is enclosed as a MIME attachment to avoid
line-wrapping problems with cutting/pasting into an e-mail.

I put in spin lock protection for all cases of traversing the linked
list. Someone who knows the code better could probably find shorter
spans of code to protect.

This problem would not be present in a 2.0 kernel since the SMP
technique there is to protect the entire kernel with one lock.

-- Dave
--------------D18E65256A1F34E347650765
Content-Type: text/plain; charset=us-ascii; name="vmalloc.c.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline; filename="vmalloc.c.patch"

--- vmalloc.c.old Fri Nov 20 13:43:19 1998
+++ vmalloc.c Wed Dec 30 09:50:14 1998
@@ -10,6 +10,7 @@
#include <asm/uaccess.h>

static struct vm_struct * vmlist = NULL;
+static spinlock_t vmlist_spinlock;

static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
{
@@ -152,28 +153,34 @@
struct vm_struct * get_vm_area(unsigned long size)
{
unsigned long addr;
+ unsigned long save_flags;
struct vm_struct **p, *tmp, *area;

- area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
+ area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_ATOMIC);
if (!area)
return NULL;
addr = VMALLOC_START;
+ spin_lock_irqsave(&vmlist_spinlock, save_flags);
for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long) tmp->addr)
break;
- if (addr > VMALLOC_END-size)
+ if (addr > VMALLOC_END-size) {
+ spin_unlock_irqrestore(&vmlist_spinlock, save_flags);
return NULL;
+ }
addr = tmp->size + (unsigned long) tmp->addr;
}
area->addr = (void *)addr;
area->size = size + PAGE_SIZE;
area->next = *p;
*p = area;
+ spin_unlock_irqrestore(&vmlist_spinlock, save_flags);
return area;
}

void vfree(void * addr)
{
+ unsigned long save_flags;
struct vm_struct **p, *tmp;

if (!addr)
@@ -182,14 +189,17 @@
printk("Trying to vfree() bad address (%p)\n", addr);
return;
}
+ spin_lock_irqsave(&vmlist_spinlock, save_flags);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
+ spin_unlock_irqrestore(&vmlist_spinlock, save_flags);
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
kfree(tmp);
return;
}
}
+ spin_unlock_irqrestore(&vmlist_spinlock, save_flags);
printk("Trying to vfree() nonexistent vm area (%p)\n", addr);
}

@@ -217,11 +227,13 @@
struct vm_struct *tmp;
char *vaddr, *buf_start = buf;
unsigned long n;
+ unsigned long save_flags;

/* Don't allow overflow */
if ((unsigned long) addr + count < count)
count = -(unsigned long) addr;

+ spin_lock_irqsave(&vmlist_spinlock, save_flags);
for (tmp = vmlist; tmp; tmp = tmp->next) {
vaddr = (char *) tmp->addr;
if (addr >= vaddr + tmp->size - PAGE_SIZE)
@@ -245,5 +257,6 @@
} while (--n > 0);
}
finished:
+ spin_unlock_irqrestore(&vmlist_spinlock, save_flags);
return buf - buf_start;
}

--------------D18E65256A1F34E347650765--

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/