aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos/exynos_drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/exynos/exynos_drm_gem.c')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c547
1 files changed, 404 insertions, 143 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 35d2cd9..7d12f6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -35,6 +35,8 @@
#include "exynos_drm_buf.h"
#include "exynos_drm_iommu.h"
+#define USERPTR_MAX_SIZE SZ_64M
+
static struct exynos_drm_private_cb *private_cb;
void exynos_drm_priv_cb_register(struct exynos_drm_private_cb *cb)
@@ -132,6 +134,45 @@ static void put_vma(struct vm_area_struct *vma)
kfree(vma);
}
+/*
+ * lock_userptr_vma - lock VMAs within user address space
+ *
+ * this function locks vma within user address space to avoid pages
+ * to the userspace from being swapped out.
+ * if this vma isn't locked, the pages to the userspace could be swapped out
+ * so unprivileged user might access different pages and dma of any device
+ * could access physical memory region not intended once swap-in.
+ */
+static int lock_userptr_vma(struct exynos_drm_gem_buf *buf, unsigned int lock)
+{
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+
+ start = buf->userptr;
+ end = buf->userptr + buf->size - 1;
+
+ down_write(&current->mm->mmap_sem);
+
+ do {
+ vma = find_vma(current->mm, start);
+ if (!vma) {
+ up_write(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ if (lock)
+ vma->vm_flags |= VM_LOCKED;
+ else
+ vma->vm_flags &= ~VM_LOCKED;
+
+ start = vma->vm_end + 1;
+ } while (vma->vm_end < end);
+
+ up_write(&current->mm->mmap_sem);
+
+ return 0;
+}
+
static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
struct vm_area_struct *vma)
{
@@ -165,25 +206,17 @@ out:
struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct inode *inode;
- struct address_space *mapping;
struct page *p, **pages;
int i, npages;
- /* This is the shared memory object that backs the GEM resource */
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
-
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
- gfpmask |= mapping_gfp_mask(mapping);
-
for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ p = alloc_page(gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
@@ -192,31 +225,22 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
return pages;
fail:
- while (i--)
- page_cache_release(pages[i]);
+ while (--i)
+ __free_page(pages[i]);
drm_free_large(pages);
return ERR_PTR(PTR_ERR(p));
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages,
- bool dirty, bool accessed)
+ struct page **pages)
{
- int i, npages;
+ int npages;
npages = obj->size >> PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- if (accessed)
- mark_page_accessed(pages[i]);
-
- /* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
- }
+ while (--npages >= 0)
+ __free_page(pages[npages]);
drm_free_large(pages);
}
@@ -236,7 +260,7 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
pfn = page_to_pfn(buf->pages[page_offset++]);
} else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+ pfn = (buf->paddr >> PAGE_SHIFT) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
@@ -255,7 +279,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_KERNEL);
+ pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
if (IS_ERR(pages)) {
DRM_ERROR("failed to get pages.\n");
return PTR_ERR(pages);
@@ -288,15 +312,13 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
sgl = sg_next(sgl);
}
- /* add some codes for UNCACHED type here. TODO */
-
buf->pages = pages;
return ret;
err1:
kfree(buf->sgt);
buf->sgt = NULL;
err:
- exynos_gem_put_pages(obj, pages, true, false);
+ exynos_gem_put_pages(obj, pages);
return ret;
}
@@ -314,7 +336,7 @@ static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
kfree(buf->sgt);
buf->sgt = NULL;
- exynos_gem_put_pages(obj, buf->pages, true, false);
+ exynos_gem_put_pages(obj, buf->pages);
buf->pages = NULL;
/* add some codes for UNCACHED type here. TODO */
@@ -338,6 +360,9 @@ static void exynos_drm_put_userptr(struct drm_gem_object *obj)
npages = buf->size >> PAGE_SHIFT;
+ if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR && !buf->pfnmap)
+ lock_userptr_vma(buf, 0);
+
npages--;
while (npages >= 0) {
if (buf->write)
@@ -381,10 +406,12 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
struct exynos_drm_gem_buf *buf;
+ struct exynos_drm_private *private;
DRM_DEBUG_KMS("%s\n", __FILE__);
obj = &exynos_gem_obj->base;
+ private = obj->dev->dev_private;
buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
@@ -401,6 +428,18 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (!buf->pages)
return;
+ /*
+ * do not release memory region from exporter.
+ *
+ * the region will be released by exporter
+ * once dmabuf's refcount becomes 0.
+ */
+ if (obj->import_attach)
+ goto out;
+
+ if (private->vmm)
+ exynos_drm_iommu_unmap_gem(obj);
+
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR)
@@ -408,6 +447,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
else
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
@@ -441,6 +481,27 @@ struct exynos_drm_gem_obj *exynos_drm_gem_get_obj(struct drm_device *dev,
return exynos_gem_obj;
}
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return 0;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return exynos_gem_obj->buffer->size;
+}
+
+
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -474,7 +535,9 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_gem_buf *buf;
+ unsigned long packed_size = size;
int ret;
if (!size) {
@@ -499,6 +562,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
goto err_fini_buf;
}
+ exynos_gem_obj->packed_size = packed_size;
exynos_gem_obj->buffer = buf;
/* set memory type and cache attribute from user side. */
@@ -522,6 +586,31 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
}
}
+ if (private->vmm) {
+ exynos_gem_obj->vmm = private->vmm;
+
+ buf->dev_addr = exynos_drm_iommu_map_gem(dev,
+ &exynos_gem_obj->base);
+ if (!buf->dev_addr) {
+ DRM_ERROR("failed to map gem with iommu table.\n");
+ ret = -EFAULT;
+
+ if (flags & EXYNOS_BO_NONCONTIG)
+ exynos_drm_gem_put_pages(&exynos_gem_obj->base);
+ else
+ exynos_drm_free_buf(dev, flags, buf);
+
+ drm_gem_object_release(&exynos_gem_obj->base);
+
+ goto err_fini_buf;
+ }
+
+ buf->dma_addr = buf->dev_addr;
+ } else
+ buf->dma_addr = buf->paddr;
+
+ DRM_DEBUG_KMS("dma_addr = 0x%x\n", buf->dma_addr);
+
return exynos_gem_obj;
err_fini_buf:
@@ -549,63 +638,49 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return ret;
}
- return 0;
+ return ret;
}
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp,
+ unsigned int *gem_obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
}
exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
+ *gem_obj = (unsigned int)obj;
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
- return &exynos_gem_obj->buffer->dma_addr;
+ return &buf->dma_addr;
}
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void *gem_obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
+ if (!gem_obj)
return;
- }
- exynos_gem_obj = to_exynos_gem_obj(obj);
+ /* use gem handle instead of object. TODO */
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
+ obj = gem_obj;
- /* TODO */
- return;
- }
-
- drm_gem_object_unreference_unlocked(obj);
+ exynos_gem_obj = to_exynos_gem_obj(obj);
/*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
+ * unreference this gem object because this had already been
+ * referenced at exynos_drm_gem_get_dma_addr().
*/
drm_gem_object_unreference_unlocked(obj);
}
@@ -681,7 +756,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ pfn = ((unsigned long)exynos_gem_obj->buffer->paddr) >>
PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -749,7 +824,9 @@ static int exynos_drm_get_userptr(struct drm_device *dev,
unsigned long npages = 0;
struct vm_area_struct *vma;
struct exynos_drm_gem_buf *buf = obj->buffer;
+ int ret;
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, userptr);
/* the memory region mmaped with VM_PFNMAP. */
@@ -767,11 +844,11 @@ static int exynos_drm_get_userptr(struct drm_device *dev,
for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
ret = follow_pfn(vma, start, &this_pfn);
if (ret)
- return ret;
+ goto err;
if (prev_pfn == 0) {
pa = this_pfn << PAGE_SHIFT;
- buf->dma_addr = pa + offset;
+ buf->paddr = pa + offset;
} else if (this_pfn != prev_pfn + 1) {
ret = -EINVAL;
goto err;
@@ -791,14 +868,30 @@ static int exynos_drm_get_userptr(struct drm_device *dev,
goto err;
}
+ up_read(&current->mm->mmap_sem);
buf->pfnmap = true;
return npages;
err:
- buf->dma_addr = 0;
+ buf->paddr = 0;
+ up_read(&current->mm->mmap_sem);
+
return ret;
}
+ up_read(&current->mm->mmap_sem);
+
+ /*
+ * lock the vma within userptr to avoid userspace buffer
+ * from being swapped out.
+ */
+ ret = lock_userptr_vma(buf, 1);
+ if (ret < 0) {
+ DRM_ERROR("failed to lock vma for userptr.\n");
+ lock_userptr_vma(buf, 0);
+ return 0;
+ }
+
buf->write = write;
npages = buf->size >> PAGE_SHIFT;
@@ -809,6 +902,7 @@ err:
if (get_npages != npages)
DRM_ERROR("failed to get user_pages.\n");
+ buf->userptr = userptr;
buf->pfnmap = false;
return get_npages;
@@ -817,11 +911,12 @@ err:
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct exynos_drm_private *priv = dev->dev_private;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_userptr *args = data;
struct exynos_drm_gem_buf *buf;
struct scatterlist *sgl;
- unsigned long size, userptr;
+ unsigned long size, userptr, packed_size;
unsigned int npages;
int ret, get_npages;
@@ -836,7 +931,15 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
+ packed_size = args->size;
+
size = roundup_gem_size(args->size, EXYNOS_BO_USERPTR);
+
+ if (size > priv->userptr_limit) {
+ DRM_ERROR("excessed maximum size of userptr.\n");
+ return -EINVAL;
+ }
+
userptr = args->userptr;
buf = exynos_drm_init_buf(dev, size);
@@ -849,6 +952,8 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto err_free_buffer;
}
+ exynos_gem_obj->packed_size = packed_size;
+
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate buf->sgt.\n");
@@ -909,8 +1014,30 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* always use EXYNOS_BO_USERPTR as memory type for userptr. */
exynos_gem_obj->flags |= EXYNOS_BO_USERPTR;
+ if (priv->vmm) {
+ exynos_gem_obj->vmm = priv->vmm;
+
+ buf->dev_addr = exynos_drm_iommu_map_gem(dev,
+ &exynos_gem_obj->base);
+ if (!buf->dev_addr) {
+ DRM_ERROR("failed to map gem with iommu table.\n");
+ ret = -EFAULT;
+
+ exynos_drm_free_buf(dev, exynos_gem_obj->flags, buf);
+
+ drm_gem_object_release(&exynos_gem_obj->base);
+
+ goto err_release_handle;
+ }
+
+ buf->dma_addr = buf->dev_addr;
+ } else
+ buf->dma_addr = buf->paddr;
+
return 0;
+err_release_handle:
+ drm_gem_handle_delete(file_priv, args->handle);
err_release_userptr:
get_npages--;
while (get_npages >= 0)
@@ -957,6 +1084,26 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int exynos_drm_gem_user_limit_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct exynos_drm_private *priv = dev->dev_private;
+ struct drm_exynos_user_limit *limit = data;
+
+ if (limit->userptr_limit < PAGE_SIZE ||
+ limit->userptr_limit > USERPTR_MAX_SIZE) {
+ DRM_DEBUG_KMS("invalid userptr_limit size.\n");
+ return -EINVAL;
+ }
+
+ if (priv->userptr_limit == limit->userptr_limit)
+ return 0;
+
+ priv->userptr_limit = limit->userptr_limit;
+
+ return 0;
+}
+
int exynos_drm_gem_export_ump_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -1034,13 +1181,16 @@ static int exynos_gem_l1_cache_ops(struct drm_device *drm_dev,
}
static int exynos_gem_l2_cache_ops(struct drm_device *drm_dev,
- struct drm_exynos_gem_cache_op *op) {
- phys_addr_t phy_start, phy_end;
-
+ struct drm_file *filp,
+ struct drm_exynos_gem_cache_op *op)
+{
if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE ||
op->flags & EXYNOS_DRM_CACHE_INV_RANGE ||
op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
+ unsigned long virt_start = op->usr_addr, pfn;
+ phys_addr_t phy_start, phy_end;
struct vm_area_struct *vma;
+ int ret;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, op->usr_addr);
@@ -1052,44 +1202,90 @@ static int exynos_gem_l2_cache_ops(struct drm_device *drm_dev,
}
/*
- * for range flush to l2 cache, mmaped memory region should
- * be physically continuous because l2 cache uses PIPT.
+ * Range operation to l2 cache(PIPT)
*/
if (vma && (vma->vm_flags & VM_PFNMAP)) {
- unsigned long virt_start = op->usr_addr, pfn;
- int ret;
-
ret = follow_pfn(vma, virt_start, &pfn);
if (ret < 0) {
- DRM_ERROR("failed to get pfn from usr_addr.\n");
+ DRM_ERROR("failed to get pfn.\n");
return ret;
}
+ /*
+ * the memory region with VM_PFNMAP is contiguous
+ * physically so do range operagion just one time.
+ */
phy_start = pfn << PAGE_SHIFT;
phy_end = phy_start + op->size;
+
+ if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
+ outer_flush_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
+ outer_inv_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
+ outer_clean_range(phy_start, phy_end);
+
+ return 0;
} else {
- DRM_ERROR("not mmaped memory region with PFNMAP.\n");
- return -EINVAL;
+ struct exynos_drm_gem_obj *exynos_obj;
+ struct exynos_drm_gem_buf *buf;
+ struct drm_gem_object *obj;
+ struct scatterlist *sgl;
+ unsigned int npages, i = 0;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(drm_dev, filp,
+ op->gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_obj = to_exynos_gem_obj(obj);
+ buf = exynos_obj->buffer;
+ npages = buf->size >> PAGE_SHIFT;
+ sgl = buf->sgt->sgl;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ /*
+ * in this case, the memory region is non-contiguous
+ * physically so do range operation to all the pages.
+ */
+ while (i < npages) {
+ phy_start = sg_dma_address(sgl);
+ phy_end = phy_start + buf->page_size;
+
+ if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
+ outer_flush_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
+ outer_inv_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
+ outer_clean_range(phy_start, phy_end);
+
+ i++;
+ sgl = sg_next(sgl);
+ }
+
+ return 0;
}
}
if (op->flags & EXYNOS_DRM_CACHE_FSH_ALL)
outer_flush_all();
- else if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
- outer_flush_range(phy_start, phy_end);
else if (op->flags & EXYNOS_DRM_CACHE_INV_ALL)
outer_inv_all();
else if (op->flags & EXYNOS_DRM_CACHE_CLN_ALL)
outer_clean_all();
- else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
- outer_inv_range(phy_start, phy_end);
- else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
- outer_clean_range(phy_start, phy_end);
else {
DRM_ERROR("invalid l2 cache operation.\n");
return -EINVAL;
}
+
return 0;
}
@@ -1105,6 +1301,33 @@ int exynos_drm_gem_cache_op_ioctl(struct drm_device *drm_dev, void *data,
if (ret)
return -EINVAL;
+ /*
+ * do cache operation for all cache range if op->size is bigger
+ * than SZ_1M because cache range operation with bit size has
+ * big cost.
+ */
+ if (op->size >= SZ_1M) {
+ if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE) {
+ if (op->flags & EXYNOS_DRM_L1_CACHE)
+ __cpuc_flush_user_all();
+
+ if (op->flags & EXYNOS_DRM_L2_CACHE)
+ outer_flush_all();
+
+ return 0;
+ } else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE) {
+ if (op->flags & EXYNOS_DRM_L2_CACHE)
+ outer_inv_all();
+
+ return 0;
+ } else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
+ if (op->flags & EXYNOS_DRM_L2_CACHE)
+ outer_clean_all();
+
+ return 0;
+ }
+ }
+
if (op->flags & EXYNOS_DRM_L1_CACHE ||
op->flags & EXYNOS_DRM_ALL_CACHES) {
ret = exynos_gem_l1_cache_ops(drm_dev, op);
@@ -1114,7 +1337,7 @@ int exynos_drm_gem_cache_op_ioctl(struct drm_device *drm_dev, void *data,
if (op->flags & EXYNOS_DRM_L2_CACHE ||
op->flags & EXYNOS_DRM_ALL_CACHES)
- ret = exynos_gem_l2_cache_ops(drm_dev, op);
+ ret = exynos_gem_l2_cache_ops(drm_dev, file_priv, op);
err:
return ret;
}
@@ -1150,7 +1373,7 @@ int exynos_drm_gem_get_phy_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- get_phy->phy_addr = exynos_gem_obj->buffer->dma_addr;
+ get_phy->phy_addr = exynos_gem_obj->buffer->paddr;
get_phy->size = exynos_gem_obj->buffer->size;
drm_gem_object_unreference(obj);
@@ -1164,24 +1387,34 @@ int exynos_drm_gem_phy_imp_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_gem_phy_imp *args = data;
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_private *private = drm_dev->dev_private;
struct exynos_drm_gem_buf *buffer;
+ unsigned long size, packed_size;
+ unsigned int flags = EXYNOS_BO_CONTIG;
+ unsigned int npages, i = 0;
+ struct scatterlist *sgl;
+ dma_addr_t start_addr;
int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_init(drm_dev, args->size);
+ packed_size = args->size;
+ size = roundup_gem_size(args->size, flags);
+
+ exynos_gem_obj = exynos_drm_gem_init(drm_dev, size);
if (!exynos_gem_obj)
return -ENOMEM;
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ buffer = exynos_drm_init_buf(drm_dev, size);
if (!buffer) {
DRM_DEBUG_KMS("failed to allocate buffer\n");
ret = -ENOMEM;
- goto err;
+ goto err_release_gem_obj;
}
- buffer->dma_addr = (dma_addr_t)args->phy_addr;
- buffer->size = args->size;
+ exynos_gem_obj->packed_size = packed_size;
+ buffer->paddr = (dma_addr_t)args->phy_addr;
+ buffer->size = size;
/*
* if shared is true, this bufer wouldn't be released.
@@ -1194,15 +1427,90 @@ int exynos_drm_gem_phy_imp_ioctl(struct drm_device *drm_dev, void *data,
ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
&args->gem_handle);
if (ret)
- goto err_kfree_buffer;
+ goto err_fini_buf;
DRM_DEBUG_KMS("got gem handle = 0x%x\n", args->gem_handle);
+ if (buffer->size >= SZ_1M) {
+ npages = buffer->size >> SECTION_SHIFT;
+ buffer->page_size = SECTION_SIZE;
+ } else if (buffer->size >= SZ_64K) {
+ npages = buffer->size >> 16;
+ buffer->page_size = SZ_64K;
+ } else {
+ npages = buffer->size >> PAGE_SHIFT;
+ buffer->page_size = PAGE_SIZE;
+ }
+
+ buffer->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!buffer->sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_release_handle;
+ }
+
+ ret = sg_alloc_table(buffer->sgt, npages, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize sg table.\n");
+ goto err_free_sgt;
+ }
+
+ buffer->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ if (!buffer->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_sg_free_table;
+ }
+
+ sgl = buffer->sgt->sgl;
+ start_addr = buffer->paddr;
+
+ while (i < npages) {
+ buffer->pages[i] = phys_to_page(start_addr);
+ sg_set_page(sgl, buffer->pages[i], buffer->page_size, 0);
+ sg_dma_address(sgl) = start_addr;
+ start_addr += buffer->page_size;
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ if (private->vmm) {
+ exynos_gem_obj->vmm = private->vmm;
+
+ buffer->dev_addr = exynos_drm_iommu_map_gem(drm_dev,
+ &exynos_gem_obj->base);
+ if (!buffer->dev_addr) {
+ DRM_ERROR("failed to map gem with iommu table.\n");
+ ret = -EFAULT;
+
+ exynos_drm_free_buf(drm_dev, flags, buffer);
+
+ drm_gem_object_release(&exynos_gem_obj->base);
+
+ goto err_free_pages;
+ }
+
+ buffer->dma_addr = buffer->dev_addr;
+ } else
+ buffer->dma_addr = buffer->paddr;
+
+ DRM_DEBUG_KMS("dma_addr = 0x%x\n", buffer->dma_addr);
+
return 0;
-err_kfree_buffer:
- kfree(buffer);
-err:
+err_free_pages:
+ kfree(buffer->pages);
+ buffer->pages = NULL;
+err_sg_free_table:
+ sg_free_table(buffer->sgt);
+err_free_sgt:
+ kfree(buffer->sgt);
+ buffer->sgt = NULL;
+err_release_handle:
+ drm_gem_handle_delete(file_priv, args->gem_handle);
+err_fini_buf:
+ exynos_drm_fini_buf(drm_dev, buffer);
+err_release_gem_obj:
drm_gem_object_release(&exynos_gem_obj->base);
kfree(exynos_gem_obj);
return ret;
@@ -1267,7 +1575,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret = 0;
@@ -1288,15 +1595,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (!exynos_gem_obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (!obj->map_list.map) {
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -1331,53 +1636,9 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
void exynos_drm_gem_close_object(struct drm_gem_object *obj,
struct drm_file *file)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct iommu_gem_map_params params;
- unsigned int type = 0;
-
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* check this gem object was mapped to iommu at here. TODO */
-
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- while (type < MAX_IOMMU_NR) {
- /*
- * unmap device address space already mapped to iommu.
- * - this codes would be performed with user gem release
- * request but in case of no request, when device driver
- * using iommu is released, also same things should be
- * performed by each driver.
- */
- if (exynos_gem_obj->iommu_info.mapped & (1 << type)) {
- dma_addr_t dma_addr;
- struct list_head *list;
-
- params.dev = exynos_gem_obj->iommu_info.devs[type];
- params.drm_dev = obj->dev;
- params.file = file;
- params.gem_obj = exynos_gem_obj;
- dma_addr = exynos_gem_obj->iommu_info.dma_addrs[type];
-
- exynos_drm_iommu_unmap_gem(&params,
- dma_addr,
- type);
-
- exynos_gem_obj->iommu_info.mapped &= ~(1 << type);
- exynos_gem_obj->iommu_info.dma_addrs[type] = 0;
-
- list = exynos_gem_obj->iommu_info.iommu_lists[type];
-
- /*
- * this gem has been unmapped from iommu so also
- * remove a iommu node from current device's own
- * iommu list.
- */
- exynos_drm_remove_iommu_list(list, exynos_gem_obj);
- }
-
- type++;
- }
+ /* TODO */
}
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)