A few weeks back I posted a cry for help with a PCI device driver which
maps hardware memory into user/kernel space using what used to be called
remap_page_range in the 2.6.9 kernel and now I'm trying to use
remap_pfn_range.
I'm still struggling with this and I'm hoping that there is an expert
out there who can point out what I'm doing wrong!!! My source code is
attached and I know I'm probably not doing this in the best way; but
it's the only way I don't know how, since I'm tasked with a job where I
have no idea what I'm doing!
When I try and access my device, which should be /dev/ibb[0-3], I get a
segmentation fault then a core dump and within a few seconds after that,
the system becomes way unstable and has to be rebooted (if it doesn't
lock up hard first).
static void
free_ibb_usr_shared(IbbSoftDev *ibb_sp)
{
struct page *page;
u_long virt_addr;
u_long ushared_addr = (u_long)ibb_sp->ushared;
for (virt_addr = ushared_addr; virt_addr < ushared_addr + PAGE_ALIGN(IBB_SHARED_SIZE);
virt_addr += PAGE_SIZE)
{
page = virt_to_page(virt_addr);
ClearPageReserved(page);
atomic_set(&page->_count, 1);
vfree((void *)virt_addr);
}
if (ibb_sp->ushared)
ibb_sp->ushared = NULL;
}
static int
alloc_ibb_usr_shared(IbbSoftDev *ibb_sp)
{
u_long virt_addr;
u_long ushared_addr;
int order = 0;
/* * From Linux Device Drivers - memory that is going to
* be mmaped must be PAGE_SIZE grained. Since we do mmap
* ushared to user space, we need to allocated it in
* PAGE_SIZE chunks.
*/
int size = PAGE_ALIGN(IBB_SHARED_SIZE);
dcmn_err(1, ("<1>" "ibb::size is %d\n", size));
dcmn_err(1, ("<1>" "ibb::order is %d\n", order));
do {
dcmn_err(1, ("<1>" "ibb::size is %d\n", size));
dcmn_err(1, ("<1>" "ibb::order is %d\n", order));
order++;
} while (size > (PAGE_SIZE * (1 << order)));
// ibb_sp->ushared = (IbbUserShared *) __get_free_pages(GFP_KERNEL,
order);
ibb_sp->ushared = (IbbUserShared *) vmalloc(4096 * 1024);
static void
free_ibb_image_table_mem(IbbSoftDev *ibb_sp)
{
uint32_t *virt_addr;
struct page *page;
dcmn_err(1, ("<1>" "free_ibb_image_table_mem(%d): Free size %d.\n",
ibb_sp->dev_num,ibb_sp->image_table_size));
/* unreserve all pages */
for( virt_addr = ibb_sp->image_table; virt_addr < ibb_sp->image_table + ibb_sp->image_table_size;
virt_addr += PAGE_SIZE)
{
page = virt_to_page(virt_addr);
ClearPageReserved(page);
atomic_set(&page->_count, 1);
// ibb_sp->image_table = (uint32_t *) __get_free_pages(GFP_KERNEL,
order);
ibb_sp->ushared = (IbbUserShared *) vmalloc(4096 * 1024);
if (ibb_sp->image_table != NULL && vsize <=
ibb_sp->image_table_size) {
const u_long start = vma->vm_start;
const u_long size = (vma->vm_end - vma->vm_start);
const u_long page = (void *)ibb_sp->image_table;
if (io_remap_pfn_range(vma, start, page, size,
vma->vm_page_prot)) {
dcmn_err(1, ("<1>" "ibb_mmap(%d): image table remap
failed\n",
ibb_sp->dev_num));
return(-EAGAIN);
}
}
else {
dcmn_err(1, ("<1>" "ibb_mmap(%d): image table params failed vs %d is %d\n",
ibb_sp->dev_num, (int)vsize,
(int)ibb_sp->image_table_size));
return(-EAGAIN);
}
}
else if (offset == IBB_SHARED_ADDR) {
if (ibb_sp->ushared != NULL && vsize <=
PAGE_ALIGN(IBB_SHARED_SIZE)) {
const u_long start = vma->vm_start;
const u_long size = (vma->vm_end - vma->vm_start);
const u_long page = (void *)ibb_sp->ushared;
if (io_remap_pfn_range(vma, start, page, size,
vma->vm_page_prot)) {
dcmn_err(1, ("<1>" "ibb_mmap(%d): ushared remap
failed\n",
ibb_sp->dev_num));
return(-EAGAIN);
}
dcmn_err(1, ("<1>" "ibb_mmap(%d): \
ushared remap: ibb_sp->ushared: %lx vma->vm_start:
%lx\n",
ibb_sp->dev_num,
(u_long)ibb_sp->ushared,
(u_long)vma->vm_start) );
}
else {
dcmn_err(1, ("<1>" "ibb_mmap(%d): ushared mmap failed\n",
ibb_sp->dev_num));
return(-EAGAIN);
}
}
/* * vsize is PAGE_SIZE at minimum. For registers and other
* small memory areas we need to check for VSIZE. For large
* chunks, vsize should equal the chunk size */
else if (offset == IBB_CONTROL_OFF && vsize == PAGE_SIZE) {
const u_long start = vma->vm_start;
const u_long size = (vma->vm_end - vma->vm_start);
const u_long page = ibb_sp->creg_phys;
if (remap_pfn_range(vma, start, page, size, vma->vm_page_prot))