aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c109
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c197
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c245
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1134
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c331
10 files changed, 548 insertions, 1543 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b2b33dd..f3cf6f0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -7,8 +7,4 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o
-ifeq ($(CONFIG_SWIOTLB),y)
-ttm-y += ttm_page_alloc_dma.o
-endif
-
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4a87282..1c4a72f 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -29,11 +29,8 @@
* Keith Packard.
*/
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
-#include "ttm/ttm_page_alloc.h"
#ifdef TTM_HAS_AGP
#include "ttm/ttm_placement.h"
#include <linux/agp_backend.h>
@@ -43,77 +40,100 @@
#include <asm/agp.h>
struct ttm_agp_backend {
- struct ttm_tt ttm;
+ struct ttm_backend backend;
struct agp_memory *mem;
struct agp_bridge_data *bridge;
};
-static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+static int ttm_agp_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
- struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct drm_mm_node *node = bo_mem->mm_node;
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct page **cur_page, **last_page = pages + num_pages;
struct agp_memory *mem;
- int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
- unsigned i;
- mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
mem->page_count = 0;
- for (i = 0; i < ttm->num_pages; i++) {
- struct page *page = ttm->pages[i];
-
+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
+ struct page *page = *cur_page;
if (!page)
- page = ttm->dummy_read_page;
+ page = dummy_read_page;
mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
+ return 0;
+}
+
+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct drm_mm_node *node = bo_mem->mm_node;
+ struct agp_memory *mem = agp_be->mem;
+ int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ int ret;
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
ret = agp_bind_memory(mem, node->start);
if (ret)
- pr_err("AGP Bind memory failed\n");
+ printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
return ret;
}
-static int ttm_agp_unbind(struct ttm_tt *ttm)
+static int ttm_agp_unbind(struct ttm_backend *backend)
{
- struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ else
+ return 0;
+}
- if (agp_be->mem) {
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
- agp_free_memory(agp_be->mem);
- agp_be->mem = NULL;
+static void ttm_agp_clear(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct agp_memory *mem = agp_be->mem;
+
+ if (mem) {
+ ttm_agp_unbind(backend);
+ agp_free_memory(mem);
}
- return 0;
+ agp_be->mem = NULL;
}
-static void ttm_agp_destroy(struct ttm_tt *ttm)
+static void ttm_agp_destroy(struct ttm_backend *backend)
{
- struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
if (agp_be->mem)
- ttm_agp_unbind(ttm);
- ttm_tt_fini(ttm);
+ ttm_agp_clear(backend);
kfree(agp_be);
}
static struct ttm_backend_func ttm_agp_func = {
+ .populate = ttm_agp_populate,
+ .clear = ttm_agp_clear,
.bind = ttm_agp_bind,
.unbind = ttm_agp_unbind,
.destroy = ttm_agp_destroy,
};
-struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge)
{
struct ttm_agp_backend *agp_be;
@@ -123,29 +143,10 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- agp_be->ttm.func = &ttm_agp_func;
-
- if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
- return NULL;
- }
-
- return &agp_be->ttm;
-}
-EXPORT_SYMBOL(ttm_agp_tt_create);
-
-int ttm_agp_tt_populate(struct ttm_tt *ttm)
-{
- if (ttm->state != tt_unpopulated)
- return 0;
-
- return ttm_pool_populate(ttm);
-}
-EXPORT_SYMBOL(ttm_agp_tt_populate);
-
-void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
-{
- ttm_pool_unpopulate(ttm);
+ agp_be->backend.func = &ttm_agp_func;
+ agp_be->backend.bdev = bdev;
+ return &agp_be->backend;
}
-EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
+EXPORT_SYMBOL(ttm_agp_backend_init);
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1843418..22a89cd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -28,8 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
@@ -70,13 +68,15 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- pr_err(" has_type: %d\n", man->has_type);
- pr_err(" use_type: %d\n", man->use_type);
- pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
- pr_err(" size: %llu\n", man->size);
- pr_err(" available_caching: 0x%08X\n", man->available_caching);
- pr_err(" default_caching: 0x%08X\n", man->default_caching);
+ printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
+ printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
+ printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
+ printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
+ printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
+ printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
+ man->available_caching);
+ printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
+ man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
(*man->func->debug)(man, TTM_PFX);
}
@@ -86,16 +86,16 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
{
int i, ret, mem_type;
- pr_err("No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return;
- pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i], mem_type);
+ printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i], mem_type);
ttm_mem_type_debug(bo->bdev, mem_type);
}
}
@@ -137,7 +137,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
- size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
@@ -153,9 +152,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->destroy)
bo->destroy(bo);
else {
+ ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
kfree(bo);
}
- ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -338,13 +337,29 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel:
- bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags, glob->dummy_read_page);
+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
+ case ttm_bo_type_user:
+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_USER,
+ glob->dummy_read_page);
+ if (unlikely(bo->ttm == NULL)) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = ttm_tt_set_user(bo->ttm, current,
+ bo->buffer_start, bo->num_pages);
+ if (unlikely(ret != 0)) {
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+ break;
default:
- pr_err("Illegal buffer object type\n");
+ printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
ret = -EINVAL;
break;
}
@@ -416,23 +431,14 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
else
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
- if (ret) {
- if (bdev->driver->move_notify) {
- struct ttm_mem_reg tmp_mem = *mem;
- *mem = bo->mem;
- bo->mem = tmp_mem;
- bdev->driver->move_notify(bo, mem);
- bo->mem = *mem;
- }
-
+ if (ret)
goto out_err;
- }
moved:
if (bo->evicted) {
ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
if (ret)
- pr_err("Can not flush read caches\n");
+ printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
bo->evicted = false;
}
@@ -466,9 +472,6 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- if (bo->bdev->driver->move_notify)
- bo->bdev->driver->move_notify(bo, NULL);
-
if (bo->ttm) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
@@ -734,7 +737,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
- pr_err("Failed to expire sync object before buffer eviction\n");
+ printk(KERN_ERR TTM_PFX
+ "Failed to expire sync object before "
+ "buffer eviction.\n");
}
goto out;
}
@@ -755,8 +760,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
- pr_err("Failed to find memory space for buffer 0x%p eviction\n",
- bo);
+ printk(KERN_ERR TTM_PFX
+ "Failed to find memory space for "
+ "buffer 0x%p eviction.\n", bo);
ttm_bo_mem_space_debug(bo, &placement);
}
goto out;
@@ -766,7 +772,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
- pr_err("Buffer eviction failed\n");
+ printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
ttm_bo_mem_put(bo, &evict_mem);
goto out;
}
@@ -907,12 +913,16 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
}
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ bool disallow_fixed,
uint32_t mem_type,
uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
+ return false;
+
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
@@ -957,6 +967,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
+ bo->type == ttm_bo_type_user,
mem_type,
placement->placement[i],
&cur_flags);
@@ -1004,6 +1015,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!man->has_type)
continue;
if (!ttm_bo_mt_compatible(man,
+ bo->type == ttm_bo_type_user,
mem_type,
placement->busy_placement[i],
&cur_flags))
@@ -1089,24 +1101,32 @@ out_unlock:
return ret;
}
-static int ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem)
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
- return -1;
+ return false;
for (i = 0; i < placement->num_placement; i++) {
- if ((placement->placement[i] & mem->placement &
- TTM_PL_MASK_CACHING) &&
- (placement->placement[i] & mem->placement &
- TTM_PL_MASK_MEM))
- return i;
+ *new_flags = placement->placement[i];
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
+ }
+
+ for (i = 0; i < placement->num_busy_placement; i++) {
+ *new_flags = placement->busy_placement[i];
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
}
- return -1;
+
+ return false;
}
int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1115,6 +1135,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
int ret;
+ uint32_t new_flags;
BUG_ON(!atomic_read(&bo->reserved));
/* Check that range is valid */
@@ -1125,8 +1146,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- ret = ttm_bo_mem_compat(placement, &bo->mem);
- if (ret < 0) {
+ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@@ -1135,7 +1155,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ttm_flag_masked(&bo->mem.placement, new_flags,
~TTM_PL_MASK_MEMTYPE);
}
/*
@@ -1173,22 +1193,11 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
{
int ret = 0;
unsigned long num_pages;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
-
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (ret) {
- pr_err("Out of kernel memory\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- return -ENOMEM;
- }
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
- pr_err("Illegal buffer object size\n");
+ printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
if (destroy)
(*destroy)(bo);
else
@@ -1254,34 +1263,14 @@ out_err:
}
EXPORT_SYMBOL(ttm_bo_init);
-size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
+static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
+ unsigned long num_pages)
{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
+ size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
+ PAGE_MASK;
- size += ttm_round_pot(struct_size);
- size += PAGE_ALIGN(npages * sizeof(void *));
- size += ttm_round_pot(sizeof(struct ttm_tt));
- return size;
+ return glob->ttm_bo_size + 2 * page_array_size;
}
-EXPORT_SYMBOL(ttm_bo_acc_size);
-
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
-{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
-
- size += ttm_round_pot(struct_size);
- size += PAGE_ALIGN(npages * sizeof(void *));
- size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
- size += ttm_round_pot(sizeof(struct ttm_dma_tt));
- return size;
-}
-EXPORT_SYMBOL(ttm_bo_dma_acc_size);
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
@@ -1295,10 +1284,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
{
struct ttm_buffer_object *bo;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
- size_t acc_size;
int ret;
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
+ size_t acc_size =
+ ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0))
return ret;
@@ -1339,7 +1328,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
if (allow_errors) {
return ret;
} else {
- pr_err("Cleanup eviction failed\n");
+ printk(KERN_ERR TTM_PFX
+ "Cleanup eviction failed\n");
}
}
spin_lock(&glob->lru_lock);
@@ -1354,14 +1344,14 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
int ret = -EINVAL;
if (mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory type %d\n", mem_type);
+ printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
return ret;
}
man = &bdev->man[mem_type];
if (!man->has_type) {
- pr_err("Trying to take down uninitialized memory manager type %u\n",
- mem_type);
+ printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
+ "memory manager type %u\n", mem_type);
return ret;
}
@@ -1384,12 +1374,16 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory manager memory type %u\n", mem_type);
+ printk(KERN_ERR TTM_PFX
+ "Illegal memory manager memory type %u.\n",
+ mem_type);
return -EINVAL;
}
if (!man->has_type) {
- pr_err("Memory type %u has not been initialized\n", mem_type);
+ printk(KERN_ERR TTM_PFX
+ "Memory type %u has not been initialized.\n",
+ mem_type);
return 0;
}
@@ -1474,10 +1468,18 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
if (unlikely(ret != 0)) {
- pr_err("Could not register buffer object swapout\n");
+ printk(KERN_ERR TTM_PFX
+ "Could not register buffer object swapout.\n");
goto out_no_shrink;
}
+ glob->ttm_bo_extra_size =
+ ttm_round_pot(sizeof(struct ttm_tt)) +
+ ttm_round_pot(sizeof(struct ttm_backend));
+
+ glob->ttm_bo_size = glob->ttm_bo_extra_size +
+ ttm_round_pot(sizeof(struct ttm_buffer_object));
+
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
@@ -1507,8 +1509,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
man->use_type = false;
if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
ret = -EBUSY;
- pr_err("DRM memory manager type %d is not clean\n",
- i);
+ printk(KERN_ERR TTM_PFX
+ "DRM memory manager type %d "
+ "is not clean.\n", i);
}
man->has_type = false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index f8187ea..082fcae 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
- struct page *d = ttm->pages[page];
+ struct page *d = ttm_tt_get_page(ttm, page);
void *dst;
if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
- struct page *s = ttm->pages[page];
+ struct page *s = ttm_tt_get_page(ttm, page);
void *src;
if (!s)
@@ -342,12 +342,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (old_iomap == NULL && ttm == NULL)
goto out2;
- if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
- goto out1;
- }
-
add = 0;
dir = 1;
@@ -445,7 +439,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
- fbo->acc_size = 0;
*new_obj = fbo;
return 0;
@@ -509,16 +502,10 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
- int ret;
+ struct page *d;
+ int i;
BUG_ON(!ttm);
-
- if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret)
- return ret;
- }
-
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
@@ -526,9 +513,18 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
*/
map->bo_kmap_type = ttm_bo_map_kmap;
- map->page = ttm->pages[start_page];
+ map->page = ttm_tt_get_page(ttm, start_page);
map->virtual = kmap(map->page);
} else {
+ /*
+ * Populate the part we're mapping;
+ */
+ for (i = start_page; i < start_page + num_pages; ++i) {
+ d = ttm_tt_get_page(ttm, i);
+ if (!d)
+ return -ENOMEM;
+ }
+
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a877813..e223175 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -28,8 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include <ttm/ttm_module.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
@@ -146,9 +144,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- bo->vm_node->start - vma->vm_pgoff;
- page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
- bo->vm_node->start - vma->vm_pgoff;
+ vma->vm_pgoff - bo->vm_node->start;
+ page_last = vma_pages(vma) + vma->vm_pgoff -
+ bo->vm_node->start;
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
@@ -176,23 +174,18 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
- retval = VM_FAULT_OOM;
- goto out_io_unlock;
- }
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
+
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
- page = ttm->pages[page_offset];
+ page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
@@ -264,7 +257,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
read_unlock(&bdev->vm_lock);
if (unlikely(bo == NULL)) {
- pr_err("Could not find buffer object to map\n");
+ printk(KERN_ERR TTM_PFX
+ "Could not find buffer object to map.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 23d2ecb..e70ddd8 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -25,8 +25,6 @@
*
**************************************************************************/
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include "ttm/ttm_page_alloc.h"
@@ -76,8 +74,9 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
- pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
- zone->name, (unsigned long long)zone->used_mem >> 10);
+ printk(KERN_INFO TTM_PFX
+ "Zone %7s: Used memory at exit: %llu kiB.\n",
+ zone->name, (unsigned long long) zone->used_mem >> 10);
kfree(zone);
}
@@ -391,11 +390,11 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
#endif
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
- zone->name, (unsigned long long)zone->max_mem >> 10);
+ printk(KERN_INFO TTM_PFX
+ "Zone %7s: Available graphics memory: %llu kiB.\n",
+ zone->name, (unsigned long long) zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
- ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -410,7 +409,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
- ttm_dma_page_alloc_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 68daca4..93577f2 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -49,8 +49,6 @@
* for fast lookup of ref objects given a base object.
*/
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include "ttm/ttm_object.h"
#include "ttm/ttm_module.h"
#include <linux/list.h>
@@ -234,7 +232,8 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
return NULL;
if (tfile != base->tfile && !base->shareable) {
- pr_err("Attempted access of non-shareable object\n");
+ printk(KERN_ERR TTM_PFX
+ "Attempted access of non-shareable object.\n");
ttm_base_object_unref(&base);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index ebc6fac..508c64c 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -30,9 +30,6 @@
* - Use page->lru to keep a free list
* - doesn't track currently in use pages
*/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/highmem.h>
@@ -170,13 +167,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
m->options.small = val;
else if (attr == &ttm_page_pool_alloc_size) {
if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
return size;
} else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
}
m->options.alloc_size = val;
}
@@ -277,7 +279,8 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
{
unsigned i;
if (set_pages_array_wb(pages, npages))
- pr_err("Failed to set %d pages to wb!\n", npages);
+ printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
+ npages);
for (i = 0; i < npages; ++i)
__free_page(pages[i]);
}
@@ -312,7 +315,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
GFP_KERNEL);
if (!pages_to_free) {
- pr_err("Failed to allocate memory for pool free operation\n");
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate memory for pool free operation.\n");
return 0;
}
@@ -394,13 +398,18 @@ static int ttm_pool_get_num_unused_pages(void)
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
- pool_offset = pool_offset % NUM_POOLS;
+ if (shrink_pages == 0)
+ goto out;
+ if (!mutex_trylock(&lock))
+ return -1;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
@@ -409,6 +418,8 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
}
+ mutex_unlock(&lock);
+out:
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
}
@@ -434,12 +445,16 @@ static int ttm_set_pages_caching(struct page **pages,
case tt_uncached:
r = set_pages_array_uc(pages, cpages);
if (r)
- pr_err("Failed to set %d pages to uc!\n", cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to uc!\n",
+ cpages);
break;
case tt_wc:
r = set_pages_array_wc(pages, cpages);
if (r)
- pr_err("Failed to set %d pages to wc!\n", cpages);
+ printk(KERN_ERR TTM_PFX
+ "Failed to set %d pages to wc!\n",
+ cpages);
break;
default:
break;
@@ -484,7 +499,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
if (!caching_array) {
- pr_err("Unable to allocate table for new pages\n");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate table for new pages.");
return -ENOMEM;
}
@@ -492,7 +508,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
p = alloc_page(gfp_flags);
if (!p) {
- pr_err("Unable to get page %u\n", i);
+ printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
/* store already allocated pages in the pool after
* setting the caching state */
@@ -590,7 +606,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
++pool->nrefills;
pool->npages += alloc_size;
} else {
- pr_err("Failed to fill pool (%p)\n", pool);
+ printk(KERN_ERR TTM_PFX
+ "Failed to fill pool (%p).", pool);
/* If we have any pages left put them to the pool. */
list_for_each_entry(p, &pool->list, lru) {
++cpages;
@@ -609,10 +626,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages,
- int ttm_flags,
- enum ttm_caching_state cstate,
- unsigned count)
+ struct list_head *pages, int ttm_flags,
+ enum ttm_caching_state cstate, unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
@@ -652,63 +667,17 @@ out:
return count;
}
-/* Put all pages in pages list to correct pool to wait for reuse */
-static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
-{
- unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- unsigned i;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- __free_page(pages[i]);
- pages[i] = NULL;
- }
- }
- return;
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- for (i = 0; i < npages; i++) {
- if (pages[i]) {
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
- list_add_tail(&pages[i]->lru, &pool->list);
- pages[i] = NULL;
- pool->npages++;
- }
- }
- /* Check that we don't go over the pool limit */
- npages = 0;
- if (pool->npages > _manager->options.max_size) {
- npages = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (npages)
- ttm_page_pool_free(pool, npages);
-}
-
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
-static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
+int ttm_get_pages(struct list_head *pages, int flags,
+ enum ttm_caching_state cstate, unsigned count,
+ dma_addr_t *dma_address)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- struct list_head plist;
struct page *p = NULL;
gfp_t gfp_flags = GFP_USER;
- unsigned count;
int r;
/* set zero flag for page allocation if required */
@@ -722,59 +691,99 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < npages; ++r) {
+ for (r = 0; r < count; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
- pr_err("Unable to allocate page\n");
+ printk(KERN_ERR TTM_PFX
+ "Unable to allocate page.");
return -ENOMEM;
}
- pages[r] = p;
+ list_add(&p->lru, pages);
}
return 0;
}
+
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */
- INIT_LIST_HEAD(&plist);
- npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
- count = 0;
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
+ count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- list_for_each_entry(p, &plist, lru) {
- clear_page(page_address(p));
+ list_for_each_entry(p, pages, lru) {
+ if (PageHighMem(p))
+ clear_highpage(p);
+ else
+ clear_page(page_address(p));
}
}
/* If pool didn't have enough pages allocate new one. */
- if (npages > 0) {
+ if (count > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
- INIT_LIST_HEAD(&plist);
- r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
- list_for_each_entry(p, &plist, lru) {
- pages[count++] = p;
- }
+ r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
- pr_err("Failed to allocate extra pages for large request\n");
- ttm_put_pages(pages, count, flags, cstate);
+ printk(KERN_ERR TTM_PFX
+ "Failed to allocate extra pages "
+ "for large request.");
+ ttm_put_pages(pages, 0, flags, cstate, NULL);
return r;
}
}
+
return 0;
}
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
+ enum ttm_caching_state cstate, dma_addr_t *dma_address)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct page *p, *tmp;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ __free_page(p);
+ }
+ /* Make the pages list empty */
+ INIT_LIST_HEAD(pages);
+ return;
+ }
+ if (page_count == 0) {
+ list_for_each_entry_safe(p, tmp, pages, lru) {
+ ++page_count;
+ }
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ list_splice_init(pages, &pool->list);
+ pool->npages += page_count;
+ /* Check that we don't go over the pool limit */
+ page_count = 0;
+ if (pool->npages > _manager->options.max_size) {
+ page_count = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (page_count < NUM_PAGES_TO_ALLOC)
+ page_count = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (page_count)
+ ttm_page_pool_free(pool, page_count);
+}
+
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
@@ -792,7 +801,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
WARN_ON(_manager);
- pr_info("Initializing pool allocator\n");
+ printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
@@ -827,7 +836,7 @@ void ttm_page_alloc_fini(void)
{
int i;
- pr_info("Finalizing pool allocator\n");
+ printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
@@ -837,62 +846,6 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
-int ttm_pool_populate(struct ttm_tt *ttm)
-{
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- unsigned i;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_get_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- if (ret != 0) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return -ENOMEM;
- }
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return ret;
- }
- }
-
- ttm->state = tt_unbound;
- return 0;
-}
-EXPORT_SYMBOL(ttm_pool_populate);
-
-void ttm_pool_unpopulate(struct ttm_tt *ttm)
-{
- unsigned i;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- if (ttm->pages[i]) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- ttm_put_pages(&ttm->pages[i], 1,
- ttm->page_flags,
- ttm->caching_state);
- }
- }
- ttm->state = tt_unpopulated;
-}
-EXPORT_SYMBOL(ttm_pool_unpopulate);
-
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
deleted file mode 100644
index 4f9e548..0000000
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ /dev/null
@@ -1,1134 +0,0 @@
-/*
- * Copyright 2011 (c) Oracle Corp.
-
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
- */
-
-/*
- * A simple DMA pool losely based on dmapool.c. It has certain advantages
- * over the DMA pools:
- * - Pool collects resently freed pages for reuse (and hooks up to
- * the shrinker).
- * - Tracks currently in use pages
- * - Tracks whether the page is UC, WB or cached (and reverts to WB
- * when freed).
- */
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/seq_file.h> /* for seq_printf */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/highmem.h>
-#include <linux/mm_types.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/atomic.h>
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include "ttm/ttm_bo_driver.h"
-#include "ttm/ttm_page_alloc.h"
-#ifdef TTM_HAS_AGP
-#include <asm/agp.h>
-#endif
-
-#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
-#define SMALL_ALLOCATION 4
-#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define IS_UNDEFINED (0)
-#define IS_WC (1<<1)
-#define IS_UC (1<<2)
-#define IS_CACHED (1<<3)
-#define IS_DMA32 (1<<4)
-
-enum pool_type {
- POOL_IS_UNDEFINED,
- POOL_IS_WC = IS_WC,
- POOL_IS_UC = IS_UC,
- POOL_IS_CACHED = IS_CACHED,
- POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
- POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
- POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
-};
-/*
- * The pool structure. There are usually six pools:
- * - generic (not restricted to DMA32):
- * - write combined, uncached, cached.
- * - dma32 (up to 2^32 - so up 4GB):
- * - write combined, uncached, cached.
- * for each 'struct device'. The 'cached' is for pages that are actively used.
- * The other ones can be shrunk by the shrinker API if neccessary.
- * @pools: The 'struct device->dma_pools' link.
- * @type: Type of the pool
- * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
- * used with irqsave/irqrestore variants because pool allocator maybe called
- * from delayed work.
- * @inuse_list: Pool of pages that are in use. The order is very important and
- * it is in the order that the TTM pages that are put back are in.
- * @free_list: Pool of pages that are free to be used. No order requirements.
- * @dev: The device that is associated with these pools.
- * @size: Size used during DMA allocation.
- * @npages_free: Count of available pages for re-use.
- * @npages_in_use: Count of pages that are in use.
- * @nfrees: Stats when pool is shrinking.
- * @nrefills: Stats when the pool is grown.
- * @gfp_flags: Flags to pass for alloc_page.
- * @name: Name of the pool.
- * @dev_name: Name derieved from dev - similar to how dev_info works.
- * Used during shutdown as the dev_info during release is unavailable.
- */
-struct dma_pool {
- struct list_head pools; /* The 'struct device->dma_pools link */
- enum pool_type type;
- spinlock_t lock;
- struct list_head inuse_list;
- struct list_head free_list;
- struct device *dev;
- unsigned size;
- unsigned npages_free;
- unsigned npages_in_use;
- unsigned long nfrees; /* Stats when shrunk. */
- unsigned long nrefills; /* Stats when grown. */
- gfp_t gfp_flags;
- char name[13]; /* "cached dma32" */
- char dev_name[64]; /* Constructed from dev */
-};
-
-/*
- * The accounting page keeping track of the allocated page along with
- * the DMA address.
- * @page_list: The link to the 'page_list' in 'struct dma_pool'.
- * @vaddr: The virtual address of the page
- * @dma: The bus address of the page. If the page is not allocated
- * via the DMA API, it will be -1.
- */
-struct dma_page {
- struct list_head page_list;
- void *vaddr;
- struct page *p;
- dma_addr_t dma;
-};
-
-/*
- * Limits for the pool. They are handled without locks because only place where
- * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialization to access them is pointless.
- */
-
-struct ttm_pool_opts {
- unsigned alloc_size;
- unsigned max_size;
- unsigned small;
-};
-
-/*
- * Contains the list of all of the 'struct device' and their corresponding
- * DMA pools. Guarded by _mutex->lock.
- * @pools: The link to 'struct ttm_pool_manager->pools'
- * @dev: The 'struct device' associated with the 'pool'
- * @pool: The 'struct dma_pool' associated with the 'dev'
- */
-struct device_pools {
- struct list_head pools;
- struct device *dev;
- struct dma_pool *pool;
-};
-
-/*
- * struct ttm_pool_manager - Holds memory pools for fast allocation
- *
- * @lock: Lock used when adding/removing from pools
- * @pools: List of 'struct device' and 'struct dma_pool' tuples.
- * @options: Limits for the pool.
- * @npools: Total amount of pools in existence.
- * @shrinker: The structure used by [un|]register_shrinker
- */
-struct ttm_pool_manager {
- struct mutex lock;
- struct list_head pools;
- struct ttm_pool_opts options;
- unsigned npools;
- struct shrinker mm_shrink;
- struct kobject kobj;
-};
-
-static struct ttm_pool_manager *_manager;
-
-static struct attribute ttm_page_pool_max = {
- .name = "pool_max_size",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_small = {
- .name = "pool_small_allocation",
- .mode = S_IRUGO | S_IWUSR
-};
-static struct attribute ttm_page_pool_alloc_size = {
- .name = "pool_allocation_size",
- .mode = S_IRUGO | S_IWUSR
-};
-
-static struct attribute *ttm_pool_attrs[] = {
- &ttm_page_pool_max,
- &ttm_page_pool_small,
- &ttm_page_pool_alloc_size,
- NULL
-};
-
-static void ttm_pool_kobj_release(struct kobject *kobj)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- kfree(m);
-}
-
-static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t size)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- int chars;
- unsigned val;
- chars = sscanf(buffer, "%u", &val);
- if (chars == 0)
- return size;
-
- /* Convert kb to number of pages */
- val = val / (PAGE_SIZE >> 10);
-
- if (attr == &ttm_page_pool_max)
- m->options.max_size = val;
- else if (attr == &ttm_page_pool_small)
- m->options.small = val;
- else if (attr == &ttm_page_pool_alloc_size) {
- if (val > NUM_PAGES_TO_ALLOC*8) {
- pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- return size;
- } else if (val > NUM_PAGES_TO_ALLOC) {
- pr_warn("Setting allocation size to larger than %lu is not recommended\n",
- NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
- }
- m->options.alloc_size = val;
- }
-
- return size;
-}
-
-static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct ttm_pool_manager *m =
- container_of(kobj, struct ttm_pool_manager, kobj);
- unsigned val = 0;
-
- if (attr == &ttm_page_pool_max)
- val = m->options.max_size;
- else if (attr == &ttm_page_pool_small)
- val = m->options.small;
- else if (attr == &ttm_page_pool_alloc_size)
- val = m->options.alloc_size;
-
- val = val * (PAGE_SIZE >> 10);
-
- return snprintf(buffer, PAGE_SIZE, "%u\n", val);
-}
-
-static const struct sysfs_ops ttm_pool_sysfs_ops = {
- .show = &ttm_pool_show,
- .store = &ttm_pool_store,
-};
-
-static struct kobj_type ttm_pool_kobj_type = {
- .release = &ttm_pool_kobj_release,
- .sysfs_ops = &ttm_pool_sysfs_ops,
- .default_attrs = ttm_pool_attrs,
-};
-
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#ifdef TTM_HAS_AGP
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif /* for !CONFIG_X86 */
-
-static int ttm_set_pages_caching(struct dma_pool *pool,
- struct page **pages, unsigned cpages)
-{
- int r = 0;
- /* Set page caching */
- if (pool->type & IS_UC) {
- r = set_pages_array_uc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to uc!\n",
- pool->dev_name, cpages);
- }
- if (pool->type & IS_WC) {
- r = set_pages_array_wc(pages, cpages);
- if (r)
- pr_err("%s: Failed to set %d pages to wc!\n",
- pool->dev_name, cpages);
- }
- return r;
-}
-
-static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
-{
- dma_addr_t dma = d_page->dma;
- dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
-
- kfree(d_page);
- d_page = NULL;
-}
-static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
-{
- struct dma_page *d_page;
-
- d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
- if (!d_page)
- return NULL;
-
- d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
- &d_page->dma,
- pool->gfp_flags);
- if (d_page->vaddr)
- d_page->p = virt_to_page(d_page->vaddr);
- else {
- kfree(d_page);
- d_page = NULL;
- }
- return d_page;
-}
-static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
-{
- enum pool_type type = IS_UNDEFINED;
-
- if (flags & TTM_PAGE_FLAG_DMA32)
- type |= IS_DMA32;
- if (cstate == tt_cached)
- type |= IS_CACHED;
- else if (cstate == tt_uncached)
- type |= IS_UC;
- else
- type |= IS_WC;
-
- return type;
-}
-
-static void ttm_pool_update_free_locked(struct dma_pool *pool,
- unsigned freed_pages)
-{
- pool->npages_free -= freed_pages;
- pool->nfrees += freed_pages;
-
-}
-
-/* set memory back to wb and free the pages. */
-static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
- struct page *pages[], unsigned npages)
-{
- struct dma_page *d_page, *tmp;
-
- /* Don't set WB on WB page pool. */
- if (npages && !(pool->type & IS_CACHED) &&
- set_pages_array_wb(pages, npages))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, npages);
-
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- }
-}
-
-static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
-{
- /* Don't set WB on WB page pool. */
- if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
-
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
-}
-
-/*
- * Free pages from pool.
- *
- * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
- * number of pages in one go.
- *
- * @pool: to free the pages from
- * @nr_free: If set to true will free all pages in pool
- **/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
-{
- unsigned long irq_flags;
- struct dma_page *dma_p, *tmp;
- struct page **pages_to_free;
- struct list_head d_pages;
- unsigned freed_pages = 0,
- npages_to_free = nr_free;
-
- if (NUM_PAGES_TO_ALLOC < nr_free)
- npages_to_free = NUM_PAGES_TO_ALLOC;
-#if 0
- if (nr_free > 1) {
- pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
- pool->dev_name, pool->name, current->pid,
- npages_to_free, nr_free);
- }
-#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
-
- if (!pages_to_free) {
- pr_err("%s: Failed to allocate memory for pool free operation\n",
- pool->dev_name);
- return 0;
- }
- INIT_LIST_HEAD(&d_pages);
-restart:
- spin_lock_irqsave(&pool->lock, irq_flags);
-
- /* We picking the oldest ones off the list */
- list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
- page_list) {
- if (freed_pages >= npages_to_free)
- break;
-
- /* Move the dma_page from one list to another. */
- list_move(&dma_p->page_list, &d_pages);
-
- pages_to_free[freed_pages++] = dma_p->p;
- /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
- if (freed_pages >= NUM_PAGES_TO_ALLOC) {
-
- ttm_pool_update_free_locked(pool, freed_pages);
- /**
- * Because changing page caching is costly
- * we unlock the pool to prevent stalling.
- */
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- ttm_dma_pages_put(pool, &d_pages, pages_to_free,
- freed_pages);
-
- INIT_LIST_HEAD(&d_pages);
-
- if (likely(nr_free != FREE_ALL_PAGES))
- nr_free -= freed_pages;
-
- if (NUM_PAGES_TO_ALLOC >= nr_free)
- npages_to_free = nr_free;
- else
- npages_to_free = NUM_PAGES_TO_ALLOC;
-
- freed_pages = 0;
-
- /* free all so restart the processing */
- if (nr_free)
- goto restart;
-
- /* Not allowed to fall through or break because
- * following context is inside spinlock while we are
- * outside here.
- */
- goto out;
-
- }
- }
-
- /* remove range of pages from the pool */
- if (freed_pages) {
- ttm_pool_update_free_locked(pool, freed_pages);
- nr_free -= freed_pages;
- }
-
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (freed_pages)
- ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
-out:
- kfree(pages_to_free);
- return nr_free;
-}
-
-static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
-{
- struct device_pools *p;
- struct dma_pool *pool;
-
- if (!dev)
- return;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry_reverse(p, &_manager->pools, pools) {
- if (p->dev != dev)
- continue;
- pool = p->pool;
- if (pool->type != type)
- continue;
-
- list_del(&p->pools);
- kfree(p);
- _manager->npools--;
- break;
- }
- list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- /* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
- WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
- /* This code path is called after _all_ references to the
- * struct device has been dropped - so nobody should be
- * touching it. In case somebody is trying to _add_ we are
- * guarded by the mutex. */
- list_del(&pool->pools);
- kfree(pool);
- break;
- }
- mutex_unlock(&_manager->lock);
-}
-
-/*
- * On free-ing of the 'struct device' this deconstructor is run.
- * Albeit the pool might have already been freed earlier.
- */
-static void ttm_dma_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- if (pool)
- ttm_dma_free_pool(dev, pool->type);
-}
-
-static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
- enum pool_type type)
-{
- char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
- enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
- struct device_pools *sec_pool = NULL;
- struct dma_pool *pool = NULL, **ptr;
- unsigned i;
- int ret = -ENODEV;
- char *p;
-
- if (!dev)
- return NULL;
-
- ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- ret = -ENOMEM;
-
- pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
- dev_to_node(dev));
- if (!pool)
- goto err_mem;
-
- sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
- dev_to_node(dev));
- if (!sec_pool)
- goto err_mem;
-
- INIT_LIST_HEAD(&sec_pool->pools);
- sec_pool->dev = dev;
- sec_pool->pool = pool;
-
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->inuse_list);
- INIT_LIST_HEAD(&pool->pools);
- spin_lock_init(&pool->lock);
- pool->dev = dev;
- pool->npages_free = pool->npages_in_use = 0;
- pool->nfrees = 0;
- pool->gfp_flags = flags;
- pool->size = PAGE_SIZE;
- pool->type = type;
- pool->nrefills = 0;
- p = pool->name;
- for (i = 0; i < 5; i++) {
- if (type & t[i]) {
- p += snprintf(p, sizeof(pool->name) - (p - pool->name),
- "%s", n[i]);
- }
- }
- *p = 0;
- /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
- * - the kobj->name has already been deallocated.*/
- snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
- dev_driver_string(dev), dev_name(dev));
- mutex_lock(&_manager->lock);
- /* You can get the dma_pool from either the global: */
- list_add(&sec_pool->pools, &_manager->pools);
- _manager->npools++;
- /* or from 'struct device': */
- list_add(&pool->pools, &dev->dma_pools);
- mutex_unlock(&_manager->lock);
-
- *ptr = pool;
- devres_add(dev, ptr);
-
- return pool;
-err_mem:
- devres_free(ptr);
- kfree(sec_pool);
- kfree(pool);
- return ERR_PTR(ret);
-}
-
-static struct dma_pool *ttm_dma_find_pool(struct device *dev,
- enum pool_type type)
-{
- struct dma_pool *pool, *tmp, *found = NULL;
-
- if (type == IS_UNDEFINED)
- return found;
-
- /* NB: We iterate on the 'struct dev' which has no spinlock, but
- * it does have a kref which we have taken. The kref is taken during
- * graphic driver loading - in the drm_pci_init it calls either
- * pci_dev_get or pci_register_driver which both end up taking a kref
- * on 'struct device'.
- *
- * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
- * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
- * thing is at that point of time there are no pages associated with the
- * driver so this function will not be called.
- */
- list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
- if (pool->type != type)
- continue;
- found = pool;
- break;
- }
- return found;
-}
-
-/*
- * Free pages the pages that failed to change the caching state. If there
- * are pages that have changed their caching state already put them to the
- * pool.
- */
-static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
- struct list_head *d_pages,
- struct page **failed_pages,
- unsigned cpages)
-{
- struct dma_page *d_page, *tmp;
- struct page *p;
- unsigned i = 0;
-
- p = failed_pages[0];
- if (!p)
- return;
- /* Find the failed page. */
- list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
- if (d_page->p != p)
- continue;
- /* .. and then progress over the full list. */
- list_del(&d_page->page_list);
- __ttm_dma_free_page(pool, d_page);
- if (++i < cpages)
- p = failed_pages[i];
- else
- break;
- }
-
-}
-
-/*
- * Allocate 'count' pages, and put 'need' number of them on the
- * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
- * The full list of pages should also be on 'd_pages'.
- * We return zero for success, and negative numbers as errors.
- */
-static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
- struct list_head *d_pages,
- unsigned count)
-{
- struct page **caching_array;
- struct dma_page *dma_p;
- struct page *p;
- int r = 0;
- unsigned i, cpages;
- unsigned max_cpages = min(count,
- (unsigned)(PAGE_SIZE/sizeof(struct page *)));
-
- /* allocate array for page caching change */
- caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
-
- if (!caching_array) {
- pr_err("%s: Unable to allocate table for new pages\n",
- pool->dev_name);
- return -ENOMEM;
- }
-
- if (count > 1) {
- pr_debug("%s: (%s:%d) Getting %d pages\n",
- pool->dev_name, pool->name, current->pid, count);
- }
-
- for (i = 0, cpages = 0; i < count; ++i) {
- dma_p = __ttm_dma_alloc_page(pool);
- if (!dma_p) {
- pr_err("%s: Unable to get page %u\n",
- pool->dev_name, i);
-
- /* store already allocated pages in the pool after
- * setting the caching state */
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r)
- ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
- }
- r = -ENOMEM;
- goto out;
- }
- p = dma_p->p;
-#ifdef CONFIG_HIGHMEM
- /* gfp flags of highmem page should never be dma32 so we
- * we should be fine in such case
- */
- if (!PageHighMem(p))
-#endif
- {
- caching_array[cpages++] = p;
- if (cpages == max_cpages) {
- /* Note: Cannot hold the spinlock */
- r = ttm_set_pages_caching(pool, caching_array,
- cpages);
- if (r) {
- ttm_dma_handle_caching_state_failure(
- pool, d_pages, caching_array,
- cpages);
- goto out;
- }
- cpages = 0;
- }
- }
- list_add(&dma_p->page_list, d_pages);
- }
-
- if (cpages) {
- r = ttm_set_pages_caching(pool, caching_array, cpages);
- if (r)
- ttm_dma_handle_caching_state_failure(pool, d_pages,
- caching_array, cpages);
- }
-out:
- kfree(caching_array);
- return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- */
-static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
- unsigned long *irq_flags)
-{
- unsigned count = _manager->options.small;
- int r = pool->npages_free;
-
- if (count > pool->npages_free) {
- struct list_head d_pages;
-
- INIT_LIST_HEAD(&d_pages);
-
- spin_unlock_irqrestore(&pool->lock, *irq_flags);
-
- /* Returns how many more are neccessary to fulfill the
- * request. */
- r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
-
- spin_lock_irqsave(&pool->lock, *irq_flags);
- if (!r) {
- /* Add the fresh to the end.. */
- list_splice(&d_pages, &pool->free_list);
- ++pool->nrefills;
- pool->npages_free += count;
- r = count;
- } else {
- struct dma_page *d_page;
- unsigned cpages = 0;
-
- pr_err("%s: Failed to fill %s pool (r:%d)!\n",
- pool->dev_name, pool->name, r);
-
- list_for_each_entry(d_page, &d_pages, page_list) {
- cpages++;
- }
- list_splice_tail(&d_pages, &pool->free_list);
- pool->npages_free += cpages;
- r = cpages;
- }
- }
- return r;
-}
-
-/*
- * @return count of pages still required to fulfill the request.
- * The populate list is actually a stack (not that is matters as TTM
- * allocates one page at a time.
- */
-static int ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_dma_tt *ttm_dma,
- unsigned index)
-{
- struct dma_page *d_page;
- struct ttm_tt *ttm = &ttm_dma->ttm;
- unsigned long irq_flags;
- int count, r = -ENOMEM;
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
- if (count) {
- d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
- ttm->pages[index] = d_page->p;
- ttm_dma->dma_address[index] = d_page->dma;
- list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
- r = 0;
- pool->npages_in_use += 1;
- pool->npages_free -= 1;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- return r;
-}
-
-/*
- * On success pages list will hold count number of correctly
- * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
- */
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- struct dma_pool *pool;
- enum pool_type type;
- unsigned i;
- gfp_t gfp_flags;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
- pool = ttm_dma_find_pool(dev, type);
- if (!pool) {
- pool = ttm_dma_pool_init(dev, gfp_flags, type);
- if (IS_ERR_OR_NULL(pool)) {
- return -ENOMEM;
- }
- }
-
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; ++i) {
- ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
- if (ret != 0) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
-
- ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- false, false);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return -ENOMEM;
- }
- }
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return ret;
- }
- }
-
- ttm->state = tt_unbound;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_populate);
-
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
- struct dma_pool *pool;
- struct dma_page *d_page, *next;
- enum pool_type type;
- bool is_cached = false;
- unsigned count = 0, i, npages = 0;
- unsigned long irq_flags;
-
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
- pool = ttm_dma_find_pool(dev, type);
- if (!pool)
- return;
-
- is_cached = (ttm_dma_find_pool(pool->dev,
- ttm_to_type(ttm->page_flags, tt_cached)) == pool);
-
- /* make sure pages array match list and count number of pages */
- list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
- ttm->pages[count] = d_page->p;
- count++;
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- pool->npages_in_use -= count;
- if (is_cached) {
- pool->nfrees += count;
- } else {
- pool->npages_free += count;
- list_splice(&ttm_dma->pages_list, &pool->free_list);
- npages = count;
- if (pool->npages_free > _manager->options.max_size) {
- npages = pool->npages_free - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (npages < NUM_PAGES_TO_ALLOC)
- npages = NUM_PAGES_TO_ALLOC;
- }
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
-
- if (is_cached) {
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- d_page->p);
- ttm_dma_page_put(pool, d_page);
- }
- } else {
- for (i = 0; i < count; i++) {
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- ttm->pages[i]);
- }
- }
-
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- for (i = 0; i < ttm->num_pages; i++) {
- ttm->pages[i] = NULL;
- ttm_dma->dma_address[i] = 0;
- }
-
- /* shrink pool if necessary (only on !is_cached pools)*/
- if (npages)
- ttm_dma_page_pool_free(pool, npages);
- ttm->state = tt_unpopulated;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
-
-/**
- * Callback for mm to request pool to reduce number of page held.
- */
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- static atomic_t start_pool = ATOMIC_INIT(0);
- unsigned idx = 0;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
- unsigned shrink_pages = sc->nr_to_scan;
- struct device_pools *p;
-
- if (list_empty(&_manager->pools))
- return 0;
-
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
- list_for_each_entry(p, &_manager->pools, pools) {
- unsigned nr_free;
-
- if (!p->dev)
- continue;
- if (shrink_pages == 0)
- break;
- /* Do it in round-robin fashion. */
- if (++idx < pool_offset)
- continue;
- nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
- pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
- p->pool->dev_name, p->pool->name, current->pid,
- nr_free, shrink_pages);
- }
- mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
-}
-
-static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
-{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
- manager->mm_shrink.seeks = 1;
- register_shrinker(&manager->mm_shrink);
-}
-
-static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
-{
- unregister_shrinker(&manager->mm_shrink);
-}
-
-int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
-{
- int ret = -ENOMEM;
-
- WARN_ON(_manager);
-
- pr_info("Initializing DMA pool allocator\n");
-
- _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- goto err_manager;
-
- mutex_init(&_manager->lock);
- INIT_LIST_HEAD(&_manager->pools);
-
- _manager->options.max_size = max_pages;
- _manager->options.small = SMALL_ALLOCATION;
- _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
-
- /* This takes care of auto-freeing the _manager */
- ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
- &glob->kobj, "dma_pool");
- if (unlikely(ret != 0)) {
- kobject_put(&_manager->kobj);
- goto err;
- }
- ttm_dma_pool_mm_shrink_init(_manager);
- return 0;
-err_manager:
- kfree(_manager);
- _manager = NULL;
-err:
- return ret;
-}
-
-void ttm_dma_page_alloc_fini(void)
-{
- struct device_pools *p, *t;
-
- pr_info("Finalizing DMA pool allocator\n");
- ttm_dma_pool_mm_shrink_fini(_manager);
-
- list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
- dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
- current->pid);
- WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
- ttm_dma_pool_match, p->pool));
- ttm_dma_free_pool(p->dev, p->pool->type);
- }
- kobject_put(&_manager->kobj);
- _manager = NULL;
-}
-
-int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
-{
- struct device_pools *p;
- struct dma_pool *pool = NULL;
- char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
- "name", "virt", "busaddr"};
-
- if (!_manager) {
- seq_printf(m, "No pool allocator running.\n");
- return 0;
- }
- seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
- h[0], h[1], h[2], h[3], h[4], h[5]);
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools) {
- struct device *dev = p->dev;
- if (!dev)
- continue;
- pool = p->pool;
- seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
- pool->name, pool->nrefills,
- pool->nfrees, pool->npages_in_use,
- pool->npages_free,
- pool->dev_name);
- }
- mutex_unlock(&_manager->lock);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index f1fd0ee..f9cc548 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,8 +28,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#define pr_fmt(fmt) "[TTM] " fmt
-
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
@@ -37,6 +35,7 @@
#include <linux/file.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include "drm_cache.h"
#include "drm_mem_util.h"
#include "ttm/ttm_module.h"
@@ -44,21 +43,140 @@
#include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
+static int ttm_tt_swapin(struct ttm_tt *ttm);
+
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
+ ttm->dma_address = drm_calloc_large(ttm->num_pages,
+ sizeof(*ttm->dma_address));
}
-static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
- ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
- sizeof(*ttm->dma_address));
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+ drm_free_large(ttm->dma_address);
+ ttm->dma_address = NULL;
+}
+
+static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
+{
+ int write;
+ int dirty;
+ struct page *page;
+ int i;
+ struct ttm_backend *be = ttm->be;
+
+ BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
+ write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
+ dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
+
+ if (be)
+ be->func->clear(be);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = ttm->pages[i];
+ if (page == NULL)
+ continue;
+
+ if (page == ttm->dummy_read_page) {
+ BUG_ON(write);
+ continue;
+ }
+
+ if (write && dirty && !PageReserved(page))
+ set_page_dirty_lock(page);
+
+ ttm->pages[i] = NULL;
+ ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
+ put_page(page);
+ }
+ ttm->state = tt_unpopulated;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
}
+static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+{
+ struct page *p;
+ struct list_head h;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ int ret;
+
+ while (NULL == (p = ttm->pages[index])) {
+
+ INIT_LIST_HEAD(&h);
+
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+ &ttm->dma_address[index]);
+
+ if (ret != 0)
+ return NULL;
+
+ p = list_first_entry(&h, struct page, lru);
+
+ ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (PageHighMem(p))
+ ttm->pages[--ttm->first_himem_page] = p;
+ else
+ ttm->pages[++ttm->last_lomem_page] = p;
+ }
+ return p;
+out_err:
+ put_page(p);
+ return NULL;
+}
+
+struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
+{
+ int ret;
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0))
+ return NULL;
+ }
+ return __ttm_tt_get_page(ttm, index);
+}
+
+int ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct page *page;
+ unsigned long i;
+ struct ttm_backend *be;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ be = ttm->be;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = __ttm_tt_get_page(ttm, i);
+ if (!page)
+ return -ENOMEM;
+ }
+
+ be->func->populate(be, ttm->num_pages, ttm->pages,
+ ttm->dummy_read_page, ttm->dma_address);
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_tt_populate);
+
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
@@ -160,100 +278,153 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
+static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
+{
+ int i;
+ unsigned count = 0;
+ struct list_head h;
+ struct page *cur_page;
+ struct ttm_backend *be = ttm->be;
+
+ INIT_LIST_HEAD(&h);
+
+ if (be)
+ be->func->clear(be);
+ for (i = 0; i < ttm->num_pages; ++i) {
+
+ cur_page = ttm->pages[i];
+ ttm->pages[i] = NULL;
+ if (cur_page) {
+ if (page_count(cur_page) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ cur_page);
+ list_add(&cur_page->lru, &h);
+ count++;
+ }
+ }
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
+ ttm->dma_address);
+ ttm->state = tt_unpopulated;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
+}
+
void ttm_tt_destroy(struct ttm_tt *ttm)
{
+ struct ttm_backend *be;
+
if (unlikely(ttm == NULL))
return;
- if (ttm->state == tt_bound) {
- ttm_tt_unbind(ttm);
+ be = ttm->be;
+ if (likely(be != NULL)) {
+ be->func->destroy(be);
+ ttm->be = NULL;
}
if (likely(ttm->pages != NULL)) {
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+ ttm_tt_free_user_pages(ttm);
+ else
+ ttm_tt_free_alloced_pages(ttm);
+
+ ttm_tt_free_page_directory(ttm);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
fput(ttm->swap_storage);
- ttm->swap_storage = NULL;
- ttm->func->destroy(ttm);
+ kfree(ttm);
}
-int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+int ttm_tt_set_user(struct ttm_tt *ttm,
+ struct task_struct *tsk,
+ unsigned long start, unsigned long num_pages)
{
- ttm->bdev = bdev;
- ttm->glob = bdev->glob;
- ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->caching_state = tt_cached;
- ttm->page_flags = page_flags;
- ttm->dummy_read_page = dummy_read_page;
- ttm->state = tt_unpopulated;
- ttm->swap_storage = NULL;
+ struct mm_struct *mm = tsk->mm;
+ int ret;
+ int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages) {
- ttm_tt_destroy(ttm);
- pr_err("Failed allocating page table\n");
+ BUG_ON(num_pages != ttm->num_pages);
+ BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
+
+ /**
+ * Account user pages as lowmem pages for now.
+ */
+
+ ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
+ false, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(tsk, mm, start, num_pages,
+ write, 0, ttm->pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (ret != num_pages && write) {
+ ttm_tt_free_user_pages(ttm);
+ ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
return -ENOMEM;
}
+
+ ttm->tsk = tsk;
+ ttm->start = start;
+ ttm->state = tt_unbound;
+
return 0;
}
-EXPORT_SYMBOL(ttm_tt_init);
-void ttm_tt_fini(struct ttm_tt *ttm)
+struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+ uint32_t page_flags, struct page *dummy_read_page)
{
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_fini);
+ struct ttm_bo_driver *bo_driver = bdev->driver;
+ struct ttm_tt *ttm;
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
+ if (!bo_driver)
+ return NULL;
+
+ ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
+ if (!ttm)
+ return NULL;
- ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->first_himem_page = ttm->num_pages;
+ ttm->last_lomem_page = -1;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
+
ttm->dummy_read_page = dummy_read_page;
- ttm->state = tt_unpopulated;
- ttm->swap_storage = NULL;
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages || !ttm_dma->dma_address) {
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
ttm_tt_destroy(ttm);
- pr_err("Failed allocating page table\n");
- return -ENOMEM;
+ printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
+ return NULL;
}
- return 0;
-}
-EXPORT_SYMBOL(ttm_dma_tt_init);
-
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
- drm_free_large(ttm_dma->dma_address);
- ttm_dma->dma_address = NULL;
+ ttm->be = bo_driver->create_ttm_backend_entry(bdev);
+ if (!ttm->be) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
+ return NULL;
+ }
+ ttm->state = tt_unpopulated;
+ return ttm;
}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
+ struct ttm_backend *be = ttm->be;
if (ttm->state == tt_bound) {
- ret = ttm->func->unbind(ttm);
+ ret = be->func->unbind(be);
BUG_ON(ret);
ttm->state = tt_unbound;
}
@@ -262,6 +433,7 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
+ struct ttm_backend *be;
if (!ttm)
return -EINVAL;
@@ -269,21 +441,25 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound)
return 0;
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ be = ttm->be;
+
+ ret = ttm_tt_populate(ttm);
if (ret)
return ret;
- ret = ttm->func->bind(ttm, bo_mem);
+ ret = be->func->bind(be, bo_mem);
if (unlikely(ret != 0))
return ret;
ttm->state = tt_bound;
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER)
+ ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
-int ttm_tt_swapin(struct ttm_tt *ttm)
+static int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
@@ -294,6 +470,16 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
int i;
int ret = -ENOMEM;
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+ ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
+ ttm->num_pages);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ return 0;
+ }
+
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
@@ -305,7 +491,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
ret = PTR_ERR(from_page);
goto out_err;
}
- to_page = ttm->pages[i];
+ to_page = __ttm_tt_get_page(ttm, i);
if (unlikely(to_page == NULL))
goto out_err;
@@ -326,6 +512,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
+ ttm_tt_free_alloced_pages(ttm);
return ret;
}
@@ -343,12 +530,24 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
+ /*
+ * For user buffers, just unpin the pages, as there should be
+ * vma references.
+ */
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
+ ttm_tt_free_user_pages(ttm);
+ ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->swap_storage = NULL;
+ return 0;
+ }
+
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
0);
if (unlikely(IS_ERR(swap_storage))) {
- pr_err("Failed allocating swap storage\n");
+ printk(KERN_ERR "Failed allocating swap storage.\n");
return PTR_ERR(swap_storage);
}
} else
@@ -377,7 +576,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ ttm_tt_free_alloced_pages(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)