We ran into the DMA memory allocation problem on 2GB machines. Here is
a patch with this 2-pass memory allocation scheme. For DMA memory
allocation, it will use the first available DMA memory. For none-DMA
memory allocation, it will first try the none-DMA memory. If it fails,
if will use the first available memory in the second pass. I am not
sure how well it will work on low memory machines. Is there a better
approach for this problem?
Thanks.
-- H.J. Lu (hjl@gnu.org)--- Index: mm/page_alloc.c =================================================================== RCS file: /work/cvs/linux/linux/mm/page_alloc.c,v retrieving revision 1.1.1.31 diff -u -p -r1.1.1.31 page_alloc.c --- mm/page_alloc.c 1999/05/12 00:49:09 1.1.1.31 +++ mm/page_alloc.c 1999/07/06 22:23:58 @@ -155,12 +155,12 @@ void free_pages(unsigned long addr, unsi change_bit((index) >> (1+(order)), (area)->map) #define CAN_DMA(x) (PageDMA(x)) #define ADDRESS(x) (PAGE_OFFSET + ((x) << PAGE_SHIFT)) -#define RMQUEUE(order, gfp_mask) \ +#define RMQUEUE_DMA(order) \ do { struct free_area_struct * area = free_area+order; \ unsigned long new_order = order; \ do { struct page *prev = memory_head(area), *ret = prev->next; \ while (memory_head(area) != ret) { \ - if (!(gfp_mask & __GFP_DMA) || CAN_DMA(ret)) { \ + if (CAN_DMA(ret)) { \ unsigned long map_nr; \ (prev->next = ret->next)->prev = prev; \ map_nr = ret - mem_map; \ @@ -176,6 +176,45 @@ do { struct free_area_struct * area = fr new_order++; area++; \ } while (new_order < NR_MEM_LISTS); \ } while (0) +#define RMQUEUE_NODMA(order) \ +do { struct free_area_struct * area = free_area+order; \ + unsigned long new_order = order; \ + do { struct page *prev = memory_head(area), *ret = prev->next; \ + while (memory_head(area) != ret) { \ + if (!CAN_DMA(ret)) { \ + unsigned long map_nr; \ + (prev->next = ret->next)->prev = prev; \ + map_nr = ret - mem_map; \ + MARK_USED(map_nr, new_order, area); \ + nr_free_pages -= 1 << order; \ + EXPAND(ret, map_nr, order, new_order, area); \ + spin_unlock_irqrestore(&page_alloc_lock, flags); \ + return ADDRESS(map_nr); \ + } \ + prev = ret; \ + ret = ret->next; \ + } \ + new_order++; area++; \ + } while (new_order < NR_MEM_LISTS); \ +} while (0) +#define RMQUEUE_ANY(order) \ +do { struct free_area_struct * area = free_area+order; \ + unsigned long new_order = order; \ + do { struct page *prev = memory_head(area), *ret = prev->next; \ + if (memory_head(area) != ret) { \ + unsigned long map_nr; \ + (prev->next = ret->next)->prev = prev; \ + map_nr = ret - mem_map; \ + MARK_USED(map_nr, new_order, area); \ + nr_free_pages -= 1 << order; \ + EXPAND(ret, map_nr, order, new_order, area); \ + spin_unlock_irqrestore(&page_alloc_lock, flags); \ + return ADDRESS(map_nr); \ + \ + } \ + new_order++; area++; \ + } while (new_order < NR_MEM_LISTS); \ +} while (0) #define EXPAND(map,index,low,high,area) \ do { unsigned long size = 1 << high; \ @@ -236,7 +275,12 @@ unsigned long __get_free_pages(int gfp_m } ok_to_allocate: spin_lock_irqsave(&page_alloc_lock, flags); - RMQUEUE(order, gfp_mask); + if (gfp_mask & __GFP_DMA) + RMQUEUE_DMA(order); + else { + RMQUEUE_NODMA(order); + RMQUEUE_ANY(order); + } spin_unlock_irqrestore(&page_alloc_lock, flags); /*- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.rutgers.edu Please read the FAQ at http://www.tux.org/lkml/