From c39d35161e87f1d7c0628af6907ac66a8c77f63f Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Thu, 2 Dec 2010 11:04:29 -0500 Subject: radeon/ttm/PCIe: Use dma_addr if TTM has set it. If the TTM layer has used the DMA API to setup pages that are TTM_PAGE_FLAG_DMA32 (look at patch titled: "ttm: Utilize the dma_addr_t array for pages that are to in DMA32 pool."), lets use it when programming the GART in the PCIe type cards. This patch skips doing the pci_map_page (and pci_unmap_page) if there is a DMA addresses passed in for that page. If the dma_address is zero (or DMA_ERROR_CODE), then we continue on with our old behaviour. [v2: Fixed an indentation problem, added reviewed-by tag] [v3: Added Acked-by Jerome] Acked-by: Jerome Glisse Reviewed-by: Thomas Hellstrom Signed-off-by: Konrad Rzeszutek Wilk Tested-by: Ian Campbell --- drivers/gpu/drm/radeon/radeon_gart.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/radeon/radeon_gart.c') diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index e65b903..5214bc2 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { if (rdev->gart.pages[p]) { - pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], - PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + if (!rdev->gart.ttm_alloced[p]) + pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); rdev->gart.pages[p] = NULL; rdev->gart.pages_addr[p] = rdev->dummy_page.addr; page_base = rdev->gart.pages_addr[p]; @@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, } int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, - int pages, struct page **pagelist) + int pages, struct page **pagelist, dma_addr_t *dma_addr) { unsigned t; unsigned p; @@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* we need to support large memory configurations */ - /* assume that unbind have already been call on the range */ - rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], + /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 + * is requested. */ + if (dma_addr[i] != DMA_ERROR_CODE) { + rdev->gart.ttm_alloced[p] = true; + rdev->gart.pages_addr[p] = dma_addr[i]; + } else { + /* we need to support large memory configurations */ + /* assume that unbind have already been call on the range */ + rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i], 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { - /* FIXME: failed to map page (return -ENOMEM?) */ - radeon_gart_unbind(rdev, offset, pages); - return -ENOMEM; + if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { + /* FIXME: failed to map page (return -ENOMEM?) */ + radeon_gart_unbind(rdev, offset, pages); + return -ENOMEM; + } } rdev->gart.pages[p] = pagelist[i]; page_base = rdev->gart.pages_addr[p]; @@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } + rdev->gart.ttm_alloced = kzalloc(sizeof(bool) * + rdev->gart.num_cpu_pages, GFP_KERNEL); + if (rdev->gart.ttm_alloced == NULL) { + radeon_gart_fini(rdev); + return -ENOMEM; + } /* set GART entry to point to the dummy page by default */ for (i = 0; i < rdev->gart.num_cpu_pages; i++) { rdev->gart.pages_addr[i] = rdev->dummy_page.addr; @@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev) rdev->gart.ready = false; kfree(rdev->gart.pages); kfree(rdev->gart.pages_addr); + kfree(rdev->gart.ttm_alloced); rdev->gart.pages = NULL; rdev->gart.pages_addr = NULL; + rdev->gart.ttm_alloced = NULL; } -- cgit v1.1