[PATCH v4 3/3] drm/nouveau/fb/nv50: defer DMA mapping of scratch page to init() hook
From: Ard Biesheuvel
Date: Mon Sep 26 2016 - 08:33:10 EST
The 100c08 scratch page is mapped using dma_map_page() before the TTM
layer has had a chance to set the DMA mask. This means we are still
running with the default of 32 when this code executes, and this causes
problems for platforms with no memory below 4 GB (such as AMD Seattle)
So move the dma_map_page() to the .init hook, which executes after the
DMA mask has been set.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx>
---
drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c | 30 +++++++++++++-------
1 file changed, 19 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index 1b5fb02eab2a..f029aaf01831 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -216,11 +216,23 @@ nv50_fb_init(struct nvkm_fb *base)
struct nv50_fb *fb = nv50_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
+ if (!fb->r100c08) {
+ dma_addr_t addr = dma_map_page(device->dev, fb->r100c08_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (!dma_mapping_error(device->dev, addr)) {
+ fb->r100c08 = addr;
+ } else {
+ nvkm_warn(&fb->base.subdev,
+ "dma_map_page() failed on 100c08 page\n");
+ }
+ }
+
/* Not a clue what this is exactly. Without pointing it at a
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
* cause IOMMU "read from address 0" errors (rh#561267)
*/
- nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
+ if (fb->r100c08)
+ nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
/* This is needed to get meaningful information from 100c90
* on traps. No idea what these values mean exactly. */
@@ -233,11 +245,11 @@ nv50_fb_dtor(struct nvkm_fb *base)
struct nv50_fb *fb = nv50_fb(base);
struct nvkm_device *device = fb->base.subdev.device;
- if (fb->r100c08_page) {
+ if (fb->r100c08)
dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- __free_page(fb->r100c08_page);
- }
+
+ __free_page(fb->r100c08_page);
return fb;
}
@@ -264,13 +276,9 @@ nv50_fb_new_(const struct nv50_fb_func *func, struct nvkm_device *device,
*pfb = &fb->base;
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (fb->r100c08_page) {
- fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(device->dev, fb->r100c08))
- return -EFAULT;
- } else {
- nvkm_warn(&fb->base.subdev, "failed 100c08 page alloc\n");
+ if (!fb->r100c08_page) {
+ nvkm_error(&fb->base.subdev, "failed 100c08 page alloc\n");
+ return -ENOMEM;
}
return 0;
--
2.7.4