diff options
author | codeworkx <daniel.hillenbrand@codeworkx.de> | 2012-06-02 13:09:29 +0200 |
---|---|---|
committer | codeworkx <daniel.hillenbrand@codeworkx.de> | 2012-06-02 13:09:29 +0200 |
commit | c6da2cfeb05178a11c6d062a06f8078150ee492f (patch) | |
tree | f3b4021d252c52d6463a9b3c1bb7245e399b009c /drivers/gpu/ion | |
parent | c6d7c4dbff353eac7919342ae6b3299a378160a6 (diff) | |
download | kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2 |
samsung update 1
Diffstat (limited to 'drivers/gpu/ion')
-rw-r--r-- | drivers/gpu/ion/Kconfig | 23 | ||||
-rw-r--r-- | drivers/gpu/ion/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/ion/exynos/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/ion/exynos/exynos_ion.c | 1146 | ||||
-rw-r--r-- | drivers/gpu/ion/ion.c | 1374 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_carveout_heap.c | 162 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_heap.c | 72 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_priv.h | 184 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_system_heap.c | 198 | ||||
-rw-r--r-- | drivers/gpu/ion/ion_system_mapper.c | 114 | ||||
-rw-r--r-- | drivers/gpu/ion/tegra/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/ion/tegra/tegra_ion.c | 96 |
12 files changed, 3374 insertions, 0 deletions
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig new file mode 100644 index 0000000..7dac510 --- /dev/null +++ b/drivers/gpu/ion/Kconfig @@ -0,0 +1,23 @@ +menuconfig ION + tristate "Ion Memory Manager" + select GENERIC_ALLOCATOR + help + Chose this option to enable the ION Memory Manager. + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + help + Choose this option if you wish to use ion on an nVidia Tegra. + +config ION_EXYNOS + tristate "Ion for Exynos" + depends on ARCH_EXYNOS && ION + select CMA + help + Choose this option if you wish to use ion on a Samsung Exynos. + +config ION_EXYNOS_CONTIGHEAP_SIZE + int "Size in Kilobytes of memory pool of EXYNOS_CONTIG_HEAP" + depends on ION_EXYNOS && CMA + default 45056 diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile new file mode 100644 index 0000000..b7a6956 --- /dev/null +++ b/drivers/gpu/ion/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o +obj-$(CONFIG_ION_TEGRA) += tegra/ +obj-$(CONFIG_ION_EXYNOS) += exynos/ diff --git a/drivers/gpu/ion/exynos/Makefile b/drivers/gpu/ion/exynos/Makefile new file mode 100644 index 0000000..9b6de53 --- /dev/null +++ b/drivers/gpu/ion/exynos/Makefile @@ -0,0 +1 @@ +obj-y += exynos_ion.o diff --git a/drivers/gpu/ion/exynos/exynos_ion.c b/drivers/gpu/ion/exynos/exynos_ion.c new file mode 100644 index 0000000..a574a32 --- /dev/null +++ b/drivers/gpu/ion/exynos/exynos_ion.c @@ -0,0 +1,1146 @@ +/* + * drivers/gpu/exynos/exynos_ion.c + * + * Copyright (C) 2011 Samsung Electronics Co., Ltd. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/platform_device.h> +#include <linux/mm.h> +#include <linux/cma.h> +#include <linux/scatterlist.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/bitops.h> +#include <linux/pagemap.h> +#include <linux/dma-mapping.h> + +#include <linux/miscdevice.h> +#include <asm/pgtable.h> +#include <asm/cacheflush.h> +#include <asm/outercache.h> + +#include "../ion_priv.h" + +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex lock; + struct rb_root heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root user_clients; + struct rb_root kernel_clients; + struct dentry *debug_root; +}; + +struct ion_device *ion_exynos; + +static int num_heaps; +static struct ion_heap **heaps; +static struct device *exynos_ion_dev; + +/* IMBUFS stands for "InterMediate BUFfer Storage" */ +#define IMBUFS_SHIFT 4 +#define IMBUFS_ENTRIES (1 << IMBUFS_SHIFT) +#define IMBUFS_MASK (IMBUFS_ENTRIES - 1) /* masking lower bits */ +#define MAX_LV0IMBUFS IMBUFS_ENTRIES +#define MAX_LV1IMBUFS (IMBUFS_ENTRIES + IMBUFS_ENTRIES * IMBUFS_ENTRIES) +#define MAX_IMBUFS (MAX_LV1IMBUFS + (IMBUFS_ENTRIES << (IMBUFS_SHIFT * 2))) + +#define LV1IDX(lv1base) ((lv1base) >> IMBUFS_SHIFT) +#define LV2IDX1(lv2base) ((lv2base) >> (IMBUFS_SHIFT * 2)) +#define LV2IDX2(lv2base) (((lv2base) >> (IMBUFS_SHIFT)) & IMBUFS_MASK) + +static int orders[] = {PAGE_SHIFT + 8, PAGE_SHIFT + 4, PAGE_SHIFT, 0}; + +static inline phys_addr_t *get_imbufs_and_free(int idx, + phys_addr_t *lv0imbufs, phys_addr_t **lv1pimbufs, + phys_addr_t ***lv2ppimbufs) +{ + if (idx < MAX_LV0IMBUFS) { + return lv0imbufs; + } else if (idx < MAX_LV1IMBUFS) { + phys_addr_t *imbufs; + idx -= MAX_LV0IMBUFS; + imbufs = lv1pimbufs[LV1IDX(idx)]; + if ((LV1IDX(idx) == (IMBUFS_ENTRIES - 1)) || + (lv1pimbufs[LV1IDX(idx) + 1] == NULL)) + kfree(lv1pimbufs); + return imbufs; + } else if (idx < MAX_IMBUFS) { + int baseidx; + phys_addr_t *imbufs; + baseidx = idx - MAX_LV1IMBUFS; + imbufs = lv2ppimbufs[LV2IDX1(baseidx)][LV2IDX2(baseidx)]; + if ((LV2IDX2(baseidx) == (IMBUFS_ENTRIES - 1)) || + (lv2ppimbufs[LV2IDX1(baseidx)][LV2IDX2(baseidx) + 1] + == NULL)) { + kfree(lv2ppimbufs[LV2IDX1(baseidx)]); + if ((LV2IDX1(baseidx) == (IMBUFS_ENTRIES - 1)) || + (lv2ppimbufs[LV2IDX1(baseidx) + 1] == NULL)) + kfree(lv2ppimbufs); + } + return imbufs; + + } + return NULL; +} + +static int ion_exynos_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + int *cur_order = orders; + int alloc_chunks = 0; + int ret = 0; + phys_addr_t *im_phys_bufs = NULL; + phys_addr_t **pim_phys_bufs = NULL; + phys_addr_t ***ppim_phys_bufs = NULL; + phys_addr_t *cur_bufs = NULL; + int copied = 0; + struct scatterlist *sgl; + struct sg_table *sgtable; + + while (size && *cur_order) { + struct page *page; + + if (size < (1 << *cur_order)) { + cur_order++; + continue; + } + + page = alloc_pages(GFP_HIGHUSER | __GFP_COMP | + __GFP_NOWARN | __GFP_NORETRY, + *cur_order - PAGE_SHIFT); + if (!page) { + cur_order++; + continue; + } + + if (alloc_chunks & IMBUFS_MASK) { + cur_bufs++; + } else if (alloc_chunks < MAX_LV0IMBUFS) { + if (!im_phys_bufs) + im_phys_bufs = kzalloc( + sizeof(*im_phys_bufs) * IMBUFS_ENTRIES, + GFP_KERNEL); + if (!im_phys_bufs) + break; + + cur_bufs = im_phys_bufs; + } else if (alloc_chunks < MAX_LV1IMBUFS) { + int lv1idx = LV1IDX(alloc_chunks - MAX_LV0IMBUFS); + + if (!pim_phys_bufs) { + pim_phys_bufs = kzalloc( + sizeof(*pim_phys_bufs) * IMBUFS_ENTRIES, + GFP_KERNEL); + if (!pim_phys_bufs) + break; + } + + if (!pim_phys_bufs[lv1idx]) { + pim_phys_bufs[lv1idx] = kzalloc( + sizeof(*cur_bufs) * IMBUFS_ENTRIES, + GFP_KERNEL); + if (!pim_phys_bufs[lv1idx]) + break; + } + + cur_bufs = pim_phys_bufs[lv1idx]; + } else if (alloc_chunks < MAX_IMBUFS) { + phys_addr_t **pcur_bufs; + int lv2base = alloc_chunks - MAX_LV1IMBUFS; + + if (!ppim_phys_bufs) { + ppim_phys_bufs = kzalloc( + sizeof(*ppim_phys_bufs) * IMBUFS_ENTRIES + , GFP_KERNEL); + if (!ppim_phys_bufs) + break; + } + + if (!ppim_phys_bufs[LV2IDX1(lv2base)]) { + ppim_phys_bufs[LV2IDX1(lv2base)] = kzalloc( + sizeof(*pcur_bufs) * IMBUFS_ENTRIES, + GFP_KERNEL); + if (!ppim_phys_bufs[LV2IDX1(lv2base)]) + break; + } + pcur_bufs = ppim_phys_bufs[LV2IDX1(lv2base)]; + + if (!pcur_bufs[LV2IDX2(lv2base)]) { + pcur_bufs[LV2IDX2(lv2base)] = kzalloc( + sizeof(*cur_bufs) * IMBUFS_ENTRIES, + GFP_KERNEL); + if (!pcur_bufs[LV2IDX2(lv2base)]) + break; + } + cur_bufs = pcur_bufs[LV2IDX2(lv2base)]; + } else { + break; + } + + *cur_bufs = page_to_phys(page) | *cur_order; + + size = size - (1 << *cur_order); + alloc_chunks++; + } + + if (size) { + ret = -ENOMEM; + goto alloc_error; + } + + sgtable = kmalloc(sizeof(*sgtable), GFP_KERNEL); + if (!sgtable) { + ret = -ENOMEM; + goto alloc_error; + } + + if (sg_alloc_table(sgtable, alloc_chunks, GFP_KERNEL)) { + ret = -ENOMEM; + kfree(sgtable); + goto alloc_error; + } + + sgl = sgtable->sgl; + while (copied < alloc_chunks) { + int i; + cur_bufs = get_imbufs_and_free(copied, im_phys_bufs, + pim_phys_bufs, ppim_phys_bufs); + BUG_ON(!cur_bufs); + for (i = 0; (i < IMBUFS_ENTRIES) && cur_bufs[i]; i++) { + phys_addr_t phys; + int order; + + phys = cur_bufs[i]; + order = phys & ~PAGE_MASK; + sg_set_page(sgl, phys_to_page(phys), 1 << order, 0); + sgl = sg_next(sgl); + copied++; + } + + kfree(cur_bufs); + } + + buffer->priv_virt = sgtable; + buffer->flags = flags; + + return 0; +alloc_error: + copied = 0; + while (copied < alloc_chunks) { + int i; + cur_bufs = get_imbufs_and_free(copied, im_phys_bufs, + pim_phys_bufs, ppim_phys_bufs); + for (i = 0; (i < IMBUFS_ENTRIES) && cur_bufs[i]; i++) { + phys_addr_t phys; + int gfp_order; + + phys = cur_bufs[i]; + gfp_order = (phys & ~PAGE_MASK) - PAGE_SHIFT; + phys = phys & PAGE_MASK; + __free_pages(phys_to_page(phys), gfp_order); + } + + kfree(cur_bufs); + copied += IMBUFS_ENTRIES; + } + + return ret; +} + +static void ion_exynos_heap_free(struct ion_buffer *buffer) +{ + struct scatterlist *sg; + int i; + struct sg_table *sgtable = buffer->priv_virt; + + for_each_sg(sgtable->sgl, sg, sgtable->orig_nents, i) + __free_pages(sg_page(sg), __ffs(sg_dma_len(sg)) - PAGE_SHIFT); + + sg_free_table(sgtable); + kfree(sgtable); +} + +static struct scatterlist *ion_exynos_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return ((struct sg_table *)buffer->priv_virt)->sgl; +} + +static void ion_exynos_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static void *ion_exynos_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct page **pages, **tmp_pages; + struct sg_table *sgt; + struct scatterlist *sgl; + int num_pages, i; + void *vaddr; + + sgt = buffer->priv_virt; + num_pages = PAGE_ALIGN(offset_in_page(sg_phys(sgt->sgl)) + buffer->size) + >> PAGE_SHIFT; + + pages = vmalloc(sizeof(*pages) * num_pages); + if (!pages) + return ERR_PTR(-ENOMEM); + + tmp_pages = pages; + for_each_sg(sgt->sgl, sgl, sgt->orig_nents, i) { + struct page *page = sg_page(sgl); + unsigned int n = + PAGE_ALIGN(sgl->offset + sg_dma_len(sgl)) >> PAGE_SHIFT; + + for (; n > 0; n--) + *(tmp_pages++) = page++; + } + + vaddr = vmap(pages, num_pages, VM_USERMAP | VM_MAP, PAGE_KERNEL); + + vfree(pages); + + return vaddr ? + vaddr + offset_in_page(sg_phys(sgt->sgl)) : ERR_PTR(-ENOMEM); +} + +static void ion_exynos_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct sg_table *sgt = buffer->priv_virt; + + vunmap(buffer->vaddr - offset_in_page(sg_phys(sgt->sgl))); +} + +static int ion_exynos_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, struct vm_area_struct *vma) +{ + struct sg_table *sgt = buffer->priv_virt; + struct scatterlist *sgl; + unsigned long pgoff; + int i; + unsigned long start; + int map_pages; + + if (buffer->kmap_cnt) + return remap_vmalloc_range(vma, buffer->vaddr, vma->vm_pgoff); + + pgoff = vma->vm_pgoff; + start = vma->vm_start; + map_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + vma->vm_flags |= VM_RESERVED; + + for_each_sg(sgt->sgl, sgl, sgt->orig_nents, i) { + unsigned long sg_pgnum = sg_dma_len(sgl) >> PAGE_SHIFT; + + if (sg_pgnum <= pgoff) { + pgoff -= sg_pgnum; + } else { + struct page *page = sg_page(sgl) + pgoff; + int i; + + sg_pgnum -= pgoff; + + for (i = 0; (map_pages > 0) && (i < sg_pgnum); i++) { + int ret; + ret = vm_insert_page(vma, start, page); + if (ret) + return ret; + start += PAGE_SIZE; + page++; + map_pages--; + } + + pgoff = 0; + + if (map_pages == 0) + break; + } + } + + return 0; +} + +static struct ion_heap_ops vmheap_ops = { + .allocate = ion_exynos_heap_allocate, + .free = ion_exynos_heap_free, + .map_dma = ion_exynos_heap_map_dma, + .unmap_dma = ion_exynos_heap_unmap_dma, + .map_kernel = ion_exynos_heap_map_kernel, + .unmap_kernel = ion_exynos_heap_unmap_kernel, + .map_user = ion_exynos_heap_map_user, +}; + +static struct ion_heap *ion_exynos_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &vmheap_ops; + heap->type = ION_HEAP_TYPE_EXYNOS; + return heap; +} + +static void ion_exynos_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + +static int ion_exynos_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + buffer->priv_phys = cma_alloc(exynos_ion_dev, NULL, len, align); + + if (0) { + /* Print debug MSG */ + struct cma_info mem_info; + struct rb_node *n=NULL; + int err; + int buffer_cnt = 0; + int size = 0; + unsigned int curr_phy = buffer->priv_phys; + unsigned int curr_size = len; + + err = cma_info(&mem_info, exynos_ion_dev, 0); + if (err) { + pr_err("%s: get cma info failed\n", __func__); + return (int)buffer->priv_phys; + } + printk("[ION_EXYNOS_CONTIG_HEAP] addr: %x ~ %x, total size: 0x%x, free size: 0x%x\n", + mem_info.lower_bound, mem_info.upper_bound, + mem_info.total_size, mem_info.free_size); + for(n = rb_first(&ion_exynos->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node); + if (buffer->heap->type == ION_HEAP_TYPE_EXYNOS_CONTIG) { + printk("[%d] 0x%x ~ 0x%x, size:0x%x\n", + buffer_cnt, (unsigned int)buffer->priv_phys, + (unsigned int)buffer->priv_phys+buffer->size, buffer->size); + size += buffer->size; + buffer_cnt++; + } + } + printk("[%d] 0x%x ~ 0x%x, size:0x%x\n", + buffer_cnt, (unsigned int)curr_phy, + (unsigned int)curr_phy+curr_size, curr_size); + printk("usage size: 0x%x\n", size); + } + + if (IS_ERR_VALUE(buffer->priv_phys)) { + struct cma_info mem_info; + struct rb_node *n=NULL; + int err; + int buffer_cnt = 0; + int size = 0; + + pr_err("%s: get cma alloc for ION failed\n", __func__); + err = cma_info(&mem_info, exynos_ion_dev, 0); + if (err) { + pr_err("%s: get cma info failed\n", __func__); + return (int)buffer->priv_phys; + } + printk(KERN_INFO + "[ION_EXYNOS_CONTIG_HEAP] addr: %x ~ %x, total size: 0x%x, free size: 0x%x\n", + mem_info.lower_bound, mem_info.upper_bound, + mem_info.total_size, mem_info.free_size); + for(n = rb_first(&ion_exynos->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node); + if (buffer->heap->type == ION_HEAP_TYPE_EXYNOS_CONTIG) { + printk(KERN_INFO "[%d] 0x%x ~ 0x%x, size:0x%x\n", + buffer_cnt, (unsigned int)buffer->priv_phys, + (unsigned int)buffer->priv_phys+buffer->size, buffer->size); + size += buffer->size; + buffer_cnt++; + } + } + printk(KERN_INFO "usage size: 0x%x\n", size); + return (int)buffer->priv_phys; + } + + buffer->flags = flags; + + return 0; +} + +static void ion_exynos_contig_heap_free(struct ion_buffer *buffer) +{ + if (0) + printk("free addr: 0x%x\n", (unsigned int)buffer->priv_phys); + + cma_free(buffer->priv_phys); +} + +static int ion_exynos_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = buffer->priv_phys; + *len = buffer->size; + return 0; +} + +static struct scatterlist *ion_exynos_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + + sglist = vmalloc(sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + sg_init_table(sglist, 1); + sg_set_page(sglist, phys_to_page(buffer->priv_phys), buffer->size, 0); + return sglist; +} + +static void ion_exynos_contig_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + vfree(buffer->sglist); +} + +static int ion_exynos_contig_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + unsigned long pfn = __phys_to_pfn(buffer->priv_phys); + + if (buffer->flags & ION_EXYNOS_NONCACHE_MASK) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + +} + +static void *ion_exynos_contig_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return phys_to_virt(buffer->priv_phys); +} + +static void ion_exynos_contig_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops contig_heap_ops = { + .allocate = ion_exynos_contig_heap_allocate, + .free = ion_exynos_contig_heap_free, + .phys = ion_exynos_contig_heap_phys, + .map_dma = ion_exynos_contig_heap_map_dma, + .unmap_dma = ion_exynos_contig_heap_unmap_dma, + .map_kernel = ion_exynos_contig_heap_map_kernel, + .unmap_kernel = ion_exynos_contig_heap_unmap_kernel, + .map_user = ion_exynos_contig_heap_map_user, +}; + +static struct ion_heap *ion_exynos_contig_heap_create( + struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &contig_heap_ops; + heap->type = ION_HEAP_TYPE_EXYNOS_CONTIG; + return heap; +} + +static void ion_exynos_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + +struct exynos_user_heap_data { + struct sg_table sgt; + bool is_pfnmap; /* The region has VM_PFNMAP property */ +}; + +static int pfnmap_digger(struct sg_table *sgt, unsigned long addr, int nr_pages) +{ + /* If the given user address is not normal mapping, + It must be contiguous physical mapping */ + struct vm_area_struct *vma; + unsigned long *pfns; + int i, ipfn, pi, ret; + struct scatterlist *sg; + unsigned int contigs; + unsigned long pfn; + + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + up_read(¤t->mm->mmap_sem); + + if ((vma == NULL) || (vma->vm_end < (addr + (nr_pages << PAGE_SHIFT)))) + return -EINVAL; + + pfns = kmalloc(sizeof(*pfns) * nr_pages, GFP_KERNEL); + if (!pfns) + return -ENOMEM; + + ret = follow_pfn(vma, addr, &pfns[0]); /* no side effect */ + if (ret) + goto err_follow_pfn; + + if (!pfn_valid(pfns[0])) { + ret = -EINVAL; + goto err_follow_pfn; + } + + addr += PAGE_SIZE; + + /* An element of pfns consists of + * - higher 20 bits: page frame number (pfn) + * - lower 12 bits: number of contiguous pages from the pfn + * Maximum size of a contiguous chunk: 16MB (4096 pages) + * contigs = 0 indicates no adjacent page is found yet. + * Thus, contigs = x means (x + 1) pages are contiguous. + */ + for (i = 1, pi = 0, ipfn = 0, contigs = 0; i < nr_pages; i++) { + ret = follow_pfn(vma, addr, &pfn); + if (ret) + break; + + if (pfns[ipfn] == (pfn - (i - pi))) { + contigs++; + } else { + if (contigs & PAGE_MASK) { + ret = -EOVERFLOW; + break; + } + + pfns[ipfn] <<= PAGE_SHIFT; + pfns[ipfn] |= contigs; + ipfn++; + pi = i; + contigs = 0; + pfns[ipfn] = pfn; + } + + addr += PAGE_SIZE; + } + + if (i == nr_pages) { + if (contigs & PAGE_MASK) { + ret = -EOVERFLOW; + goto err_follow_pfn; + } + + pfns[ipfn] <<= PAGE_SHIFT; + pfns[ipfn] |= contigs; + + nr_pages = ipfn + 1; + } else { + goto err_follow_pfn; + } + + ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); + if (ret) + goto err_follow_pfn; + + for_each_sg(sgt->sgl, sg, nr_pages, i) + sg_set_page(sg, phys_to_page(pfns[i]), + ((pfns[i] & ~PAGE_MASK) + 1) << PAGE_SHIFT, 0); +err_follow_pfn: + kfree(pfns); + return ret; +} + +static int ion_exynos_user_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + unsigned long start = align; + size_t last_size = 0; + struct page **pages; + int nr_pages; + int ret = 0, i; + off_t start_off; + struct exynos_user_heap_data *privdata = NULL; + struct scatterlist *sgl; + + last_size = (start + len) & ~PAGE_MASK; + if (last_size == 0) + last_size = PAGE_SIZE; + + start_off = offset_in_page(start); + + start = round_down(start, PAGE_SIZE); + + nr_pages = PFN_DOWN(PAGE_ALIGN(len + start_off)); + + pages = kzalloc(nr_pages * sizeof(*pages), GFP_KERNEL); + if (!pages) + return -ENOMEM; + + privdata = kmalloc(sizeof(*privdata), GFP_KERNEL); + if (!privdata) { + ret = -ENOMEM; + goto err_privdata; + } + + buffer->priv_virt = privdata; + buffer->flags = flags; + + ret = get_user_pages_fast(start, nr_pages, + flags & ION_EXYNOS_WRITE_MASK, pages); + + if (ret < 0) { + kfree(pages); + + ret = pfnmap_digger(&privdata->sgt, start, nr_pages); + if (ret) + goto err_pfnmap; + + privdata->is_pfnmap = true; + + return 0; + } + + if (ret != nr_pages) { + nr_pages = ret; + ret = -EFAULT; + goto err_alloc_sg; + } + + ret = sg_alloc_table(&privdata->sgt, nr_pages, GFP_KERNEL); + if (ret) + goto err_alloc_sg; + + sgl = privdata->sgt.sgl; + + sg_set_page(sgl, pages[0], + (nr_pages == 1) ? len : PAGE_SIZE - start_off, + start_off); + + sgl = sg_next(sgl); + + /* nr_pages == 1 if sgl == NULL here */ + for (i = 1; i < (nr_pages - 1); i++) { + sg_set_page(sgl, pages[i], PAGE_SIZE, 0); + sgl = sg_next(sgl); + } + + if (sgl) + sg_set_page(sgl, pages[i], last_size, 0); + + privdata->is_pfnmap = false; + + kfree(pages); + + return 0; +err_alloc_sg: + for (i = 0; i < nr_pages; i++) + put_page(pages[i]); +err_pfnmap: + kfree(privdata); +err_privdata: + kfree(pages); + return ret; +} + +static void ion_exynos_user_heap_free(struct ion_buffer *buffer) +{ + struct scatterlist *sg; + int i; + struct exynos_user_heap_data *privdata = buffer->priv_virt; + + if (!privdata->is_pfnmap) { + if (buffer->flags & ION_EXYNOS_WRITE_MASK) { + for_each_sg(privdata->sgt.sgl, sg, + privdata->sgt.orig_nents, i) { + set_page_dirty_lock(sg_page(sg)); + put_page(sg_page(sg)); + } + } else { + for_each_sg(privdata->sgt.sgl, sg, + privdata->sgt.orig_nents, i) + put_page(sg_page(sg)); + } + } + + sg_free_table(&privdata->sgt); + kfree(privdata); +} + +static struct ion_heap_ops user_heap_ops = { + .allocate = ion_exynos_user_heap_allocate, + .free = ion_exynos_user_heap_free, + .map_dma = ion_exynos_heap_map_dma, + .unmap_dma = ion_exynos_heap_unmap_dma, + .map_kernel = ion_exynos_heap_map_kernel, + .unmap_kernel = ion_exynos_heap_unmap_kernel, +}; + +static struct ion_heap *ion_exynos_user_heap_create( + struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &user_heap_ops; + heap->type = ION_HEAP_TYPE_EXYNOS_USER; + return heap; +} + +static void ion_exynos_user_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + +enum ION_MSYNC_TYPE { + IMSYNC_DEV_TO_READ = 0, + IMSYNC_DEV_TO_WRITE = 1, + IMSYNC_DEV_TO_RW = 2, + IMSYNC_BUF_TYPES_MASK = 3, + IMSYNC_BUF_TYPES_NUM = 4, + IMSYNC_SYNC_FOR_DEV = 0x10000, + IMSYNC_SYNC_FOR_CPU = 0x20000, +}; + +static enum dma_data_direction ion_msync_dir_table[IMSYNC_BUF_TYPES_NUM] = { + DMA_TO_DEVICE, + DMA_FROM_DEVICE, + DMA_BIDIRECTIONAL, +}; + + +static bool need_cache_invalidate(long dir) +{ + return !(ion_msync_dir_table[dir & IMSYNC_BUF_TYPES_MASK] == + DMA_TO_DEVICE); +} + +static void flush_local_cache_all(void *p) +{ + flush_cache_all(); +} + +static long ion_exynos_heap_msync(struct ion_client *client, + struct ion_handle *handle, off_t offset, size_t size, long dir) +{ + struct ion_buffer *buffer; +#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412) + enum dma_data_direction dmadir; +#endif + struct scatterlist *sg, *tsg; + int nents = 0; + int ret = 0; + + buffer = ion_share(client, handle); + if (IS_ERR(buffer)) + return PTR_ERR(buffer); + + if ((offset + size) > buffer->size) + return -EINVAL; + + sg = ion_map_dma(client, handle); + if (IS_ERR(sg)) + return PTR_ERR(sg); + + while (sg && (offset >= sg_dma_len(sg))) { + offset -= sg_dma_len(sg); + sg = sg_next(sg); + } + + size += offset; + + if (!sg) + goto err_buf_sync; + +#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412) + if (size > SZ_64K) + flush_all_cpu_caches(); + + if (size > SZ_1M) { + if (need_cache_invalidate(dir)) + outer_flush_all(); + else + outer_clean_all(); + goto done; + } +#endif + + tsg = sg; + while (tsg && (size > sg_dma_len(tsg))) { + size -= sg_dma_len(tsg); + nents++; + tsg = sg_next(tsg); + } + + if (tsg && size) + nents++; + +#if defined(CONFIG_CPU_EXYNOS4212) || defined(CONFIG_CPU_EXYNOS4412) + if (size > SZ_64K) { + if (need_cache_invalidate(dir)) + for (; nents > 0; nents--, sg = sg_next(sg)) + outer_flush_range(sg_phys(sg), + sg_phys(sg) + sg_dma_len(sg)); + else + for (; nents > 0; nents--, sg = sg_next(sg)) + outer_clean_range(sg_phys(sg), + sg_phys(sg) + sg_dma_len(sg)); + goto done; + } + + /* size <= SZ_64K */ + dmadir = ion_msync_dir_table[dir & IMSYNC_BUF_TYPES_MASK]; + + if ((nents == 1) && (buffer->flags & ION_HEAP_TYPE_EXYNOS_CONTIG)) { + if (dir & IMSYNC_SYNC_FOR_CPU) + dma_sync_single_for_cpu(NULL, sg_phys(sg) + offset, + size, dmadir); + else if (dir & IMSYNC_SYNC_FOR_DEV) + dma_sync_single_for_device(NULL, sg_phys(sg) + offset, + size, dmadir); + } else { + if (dir & IMSYNC_SYNC_FOR_CPU) + dma_sync_sg_for_cpu(NULL, sg, nents, dmadir); + else if (dir & IMSYNC_SYNC_FOR_DEV) + dma_sync_sg_for_device(NULL, sg, nents, dmadir); + } +#else + /* TODO: exclude offset in the first entry and remainder of the + last entry. */ + if (dir & IMSYNC_SYNC_FOR_CPU) + dma_sync_sg_for_cpu(NULL, sg, nents, + ion_msync_dir_table[dir & IMSYNC_BUF_TYPES_MASK]); + else if (dir & IMSYNC_SYNC_FOR_DEV) + dma_sync_sg_for_device(NULL, sg, nents, + ion_msync_dir_table[dir & IMSYNC_BUF_TYPES_MASK]); +#endif + +done: +err_buf_sync: + ion_unmap_dma(client, handle); + return ret; +} + +struct ion_msync_data { + enum ION_MSYNC_TYPE dir; + int fd_buffer; + size_t size; + off_t offset; +}; + +struct ion_phys_data { + int fd_buffer; + ion_phys_addr_t phys; + size_t size; +}; + +enum ION_EXYNOS_CUSTOM_CMD { + ION_EXYNOS_CUSTOM_MSYNC, + ION_EXYNOS_CUSTOM_PHYS +}; + +static long exynos_heap_ioctl(struct ion_client *client, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + + switch (cmd) { + case ION_EXYNOS_CUSTOM_MSYNC: + { + struct ion_msync_data data; + struct ion_handle *handle; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + + if ((data.offset + data.size) < data.offset) + return -EINVAL; + + handle = ion_import_fd(client, data.fd_buffer); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ret = ion_exynos_heap_msync(client, handle, data.offset, + data.size, data.dir); + ion_free(client, handle); + break; + } + case ION_EXYNOS_CUSTOM_PHYS: + { + struct ion_phys_data data; + struct ion_handle *handle; + + if (copy_from_user(&data, (void __user *)arg, + sizeof(data))) { + return -EFAULT; + } + + handle = ion_import_fd(client, data.fd_buffer); + + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ret = ion_phys(client, handle, &data.phys, &data.size); + if (ret) + return ret; + + if (copy_to_user((void __user *)arg, + &data, sizeof(data))) { + return -EFAULT; + } + ion_free(client, handle); + + break; + } + default: + return -ENOTTY; + } + + return ret; +} + +static struct ion_heap *__ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_EXYNOS: + heap = ion_exynos_heap_create(heap_data); + break; + case ION_HEAP_TYPE_EXYNOS_CONTIG: + heap = ion_exynos_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_EXYNOS_USER: + heap = ion_exynos_user_heap_create(heap_data); + break; + default: + return ion_heap_create(heap_data); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %u\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + + return heap; +} + +void __ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_EXYNOS: + ion_exynos_heap_destroy(heap); + break; + case ION_HEAP_TYPE_EXYNOS_CONTIG: + ion_exynos_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_EXYNOS_USER: + ion_exynos_user_heap_destroy(heap); + break; + default: + ion_heap_destroy(heap); + } +} + +static int exynos_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + if (!heaps) + return -ENOMEM; + + ion_exynos = ion_device_create(&exynos_heap_ioctl); + if (IS_ERR_OR_NULL(ion_exynos)) { + kfree(heaps); + return PTR_ERR(ion_exynos); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = __ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(ion_exynos, heaps[i]); + } + platform_set_drvdata(pdev, ion_exynos); + + exynos_ion_dev = &pdev->dev; + + return 0; +err: + for (i = 0; i < num_heaps; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + return err; +} + +static int exynos_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + __ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = exynos_ion_probe, + .remove = exynos_ion_remove, + .driver = { .name = "ion-exynos" } +}; + +static int __init ion_init(void) +{ + return platform_driver_register(&ion_driver); +} + +subsys_initcall(ion_init); diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 0000000..eb81503 --- /dev/null +++ b/drivers/gpu/ion/ion.c @@ -0,0 +1,1374 @@ +/* + * drivers/gpu/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/ion.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/debugfs.h> + +#include "ion_priv.h" +#define DEBUG + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @lock: lock protecting the buffers & heaps trees + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex lock; + struct rb_root heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root user_clients; + struct rb_root kernel_clients; + struct dentry *debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @ref: for reference counting the client + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @lock: lock protecting the tree of handles + * @heap_mask: mask of all supported heaps + * @name: used for debugging + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct kref ref; + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct mutex lock; + unsigned int heap_mask; + const char *name; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @dmap_cnt: count of times this client has mapped for dma + * @usermap_cnt: count of times this client has mapped for userspace + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + unsigned int dmap_cnt; + unsigned int usermap_cnt; +}; + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer already found.", __func__); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + int ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + if (ret) { + kfree(buffer); + return ERR_PTR(ret); + } + buffer->dev = dev; + buffer->size = len; + mutex_init(&buffer->lock); + ion_buffer_add(dev, buffer); + return buffer; +} + +static void ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_device *dev = buffer->dev; + + if (WARN_ON(buffer->kmap_cnt > 0)) + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + + if (WARN_ON(buffer->dmap_cnt > 0)) + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + + buffer->heap->ops->free(buffer); + mutex_lock(&dev->lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->lock); + kfree(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, ion_buffer_destroy); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + rb_init_node(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + /* XXX Can a handle be destroyed while it's map count is non-zero?: + if (handle->map_cnt) unmap + */ + ion_buffer_put(handle->buffer); + mutex_lock(&handle->client->lock); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &handle->client->handles); + mutex_unlock(&handle->client->lock); + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ + return kref_put(&handle->ref, ion_handle_destroy); +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n; + + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + if (handle->buffer == buffer) + return handle; + } + return NULL; +} + +static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *handle_node = rb_entry(n, struct ion_handle, + node); + if (handle < handle_node) + n = n->rb_left; + else if (handle > handle_node) + n = n->rb_right; + else + return true; + } + return false; +} + +static void ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle < entry) + p = &(*p)->rb_left; + else if (handle > entry) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int flags) +{ + struct rb_node *n; + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + if (WARN_ON(!len)) + return ERR_PTR(-EINVAL); + + len = PAGE_ALIGN(len); + + mutex_lock(&dev->lock); + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); + /* if the client doesn't support this heap type */ + if (!((1 << heap->type) & client->heap_mask)) + continue; + /* if the caller didn't specify this heap type */ + if (!((1 << heap->id) & flags)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR_OR_NULL(buffer)) + break; + } + mutex_unlock(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_PTR(PTR_ERR(buffer)); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (!IS_ERR_OR_NULL(handle)) { + mutex_lock(&client->lock); + ion_handle_add(client, handle); + mutex_unlock(&client->lock); + } + + return handle; +} + +#ifdef CONFIG_ION_EXYNOS +struct ion_handle *ion_exynos_get_user_pages(struct ion_client *client, + unsigned long uvaddr, size_t len, unsigned int flags) +{ + struct rb_node *n; + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + + if (WARN_ON(!len)) + return ERR_PTR(-EINVAL); + + if (WARN_ON((flags & ~ION_EXYNOS_WRITE_MASK) + != ION_HEAP_EXYNOS_USER_MASK)) + return ERR_PTR(-ENOSYS); + + mutex_lock(&dev->lock); + for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { + struct ion_heap *heap = rb_entry(n, struct ion_heap, node); + /* if the client doesn't support this heap type */ + if (!((1 << heap->type) & client->heap_mask)) + continue; + /* if the caller didn't specify this heap type */ + if (!((1 << heap->id) & flags)) + continue; + buffer = ion_buffer_create(heap, dev, len, uvaddr, flags); + if (!IS_ERR_OR_NULL(buffer)) + break; + } + mutex_unlock(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_PTR(PTR_ERR(buffer)); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (!IS_ERR_OR_NULL(handle)) { + mutex_lock(&client->lock); + ion_handle_add(client, handle); + mutex_unlock(&client->lock); + } + + return handle; +} +#endif /* CONFIG_ION_EXYNOS */ + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + mutex_unlock(&client->lock); + + if (!valid_handle) { + WARN("%s: invalid handle passed to free.\n", __func__); + return; + } + ion_handle_put(handle); +} + +static void ion_client_get(struct ion_client *client); +static int ion_client_put(struct ion_client *client); + +static bool _ion_map(int *buffer_cnt, int *handle_cnt) +{ + bool map; + + BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); + + if (*buffer_cnt) + map = false; + else + map = true; + if (*handle_cnt == 0) + (*buffer_cnt)++; + (*handle_cnt)++; + return map; +} + +static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) +{ + BUG_ON(*handle_cnt == 0); + (*handle_cnt)--; + if (*handle_cnt != 0) + return false; + BUG_ON(*buffer_cnt == 0); + (*buffer_cnt)--; + if (*buffer_cnt == 0) + return true; + return false; +} + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("%s: ion_phys is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (IS_ERR_OR_NULL(vaddr)) + _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); + buffer->vaddr = vaddr; + } else { + vaddr = buffer->vaddr; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} + +struct scatterlist *ion_map_dma(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct scatterlist *sglist; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + mutex_lock(&buffer->lock); + + if (!handle->buffer->heap->ops->map_dma) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { + sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); + if (IS_ERR_OR_NULL(sglist)) + _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); + buffer->sglist = sglist; + } else { + sglist = buffer->sglist; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return sglist; +} + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} + +void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->sglist = NULL; + } + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} + + +struct ion_buffer *ion_share(struct ion_client *client, + struct ion_handle *handle) +{ + bool valid_handle; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + mutex_unlock(&client->lock); + if (!valid_handle) { + WARN("%s: invalid handle passed to share.\n", __func__); + return ERR_PTR(-EINVAL); + } + + /* do not take an extra reference here, the burden is on the caller + * to make sure the buffer doesn't go away while it's passing it + * to another client -- ion_free should not be called on this handle + * until the buffer has been imported into the other client + */ + return handle->buffer; +} + +struct ion_handle *ion_import(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle = NULL; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR_OR_NULL(handle)) { + ion_handle_get(handle); + goto end; + } + handle = ion_handle_create(client, buffer); + if (IS_ERR_OR_NULL(handle)) + goto end; + ion_handle_add(client, handle); +end: + mutex_unlock(&client->lock); + return handle; +} + +static const struct file_operations ion_share_fops; + +int ion_share_fd(struct ion_client *client, struct ion_handle *handle) +{ + int fd = get_unused_fd(); + struct file *filp; + struct ion_buffer *buffer; + + if (fd < 0) + return fd; + + buffer = ion_share(client, handle); + if (IS_ERR(buffer)) + goto err; + + filp = anon_inode_getfile("ion_share_fd", &ion_share_fops, buffer, + O_RDWR); + if (IS_ERR_OR_NULL(filp)) + goto err; + + ion_buffer_get(buffer); + fd_install(fd, filp); + return fd; +err: + put_unused_fd(fd); + return -ENFILE; +} + +struct ion_handle *ion_import_fd(struct ion_client *client, int fd) +{ + struct file *file = fget(fd); + struct ion_handle *handle; + + if (!file) { + pr_err("%s: imported fd not found in file table.\n", __func__); + return ERR_PTR(-EINVAL); + } + if (file->f_op != &ion_share_fops) { + pr_err("%s: imported file is not a shared ion file.\n", + __func__); + handle = ERR_PTR(-EINVAL); + goto end; + } + handle = ion_import(client, file->private_data); +end: + fput(file); + return handle; +} + +struct ion_handle *ion_import_uva(struct ion_client *client, unsigned long uva, + off_t *offset) +{ + struct vm_area_struct *vma; + struct ion_handle *handle; + + vma = find_vma(current->mm, uva); + if (!vma) { + pr_err("%s: invalid importing address 0x%lx.\n", __func__, uva); + return ERR_PTR(-EINVAL); + } + + if (!vma->vm_file) { + pr_debug("%s: imported address is not file-mapped\n", __func__); + return ERR_PTR(-ENXIO); + } + + if (vma->vm_file->f_op != &ion_share_fops) { + pr_debug("%s: imported file is not a shared ion file.\n", + __func__); + return ERR_PTR(-ENXIO); + } + + handle = ion_import(client, vma->vm_file->private_data); + if (IS_ERR(handle)) + return handle; + + if (offset) { + ion_phys_addr_t phys; + + *offset = vma->vm_pgoff << PAGE_SHIFT; + + if (is_linear_pfn_mapping(vma)) { + /* if vma is VM_PFN_AT_MMAPed, vma->vm_pgoff indicates + * mapped physical address */ + size_t len; + if (ion_phys(client, handle, &phys, &len)) + return ERR_PTR(-EINVAL); + + *offset -= phys; + } + *offset += uva - vma->vm_start; + } + + return handle; +} + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAPS] = {0}; + const char *names[ION_NUM_HEAPS] = {0}; + int i; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + enum ion_heap_type type = handle->buffer->heap->type; + + if (!names[type]) + names[type] = handle->buffer->heap->name; + sizes[type] += handle->buffer->size; + } + mutex_unlock(&client->lock); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAPS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], + atomic_read(&client->ref.refcount)); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct ion_client *ion_client_lookup(struct ion_device *dev, + struct task_struct *task) +{ + struct rb_node *n = dev->user_clients.rb_node; + struct ion_client *client; + + mutex_lock(&dev->lock); + while (n) { + client = rb_entry(n, struct ion_client, node); + if (task == client->task) { + ion_client_get(client); + mutex_unlock(&dev->lock); + return client; + } else if (task < client->task) { + n = n->rb_left; + } else if (task > client->task) { + n = n->rb_right; + } + } + mutex_unlock(&dev->lock); + return NULL; +} + +struct ion_client *ion_client_create(struct ion_device *dev, + unsigned int heap_mask, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + char debug_name[64]; + pid_t pid; + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + /* if this isn't a kernel thread, see if a client already + exists */ + if (task) { + client = ion_client_lookup(dev, task); + if (!IS_ERR_OR_NULL(client)) { + put_task_struct(current->group_leader); + return client; + } + } + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) { + if (task) + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); + } + + client->dev = dev; + client->handles = RB_ROOT; + mutex_init(&client->lock); + client->name = name; + client->heap_mask = heap_mask; + client->task = task; + client->pid = pid; + kref_init(&client->ref); + + mutex_lock(&dev->lock); + if (task) { + p = &dev->user_clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (task < entry->task) + p = &(*p)->rb_left; + else if (task > entry->task) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->user_clients); + } else { + p = &dev->kernel_clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->kernel_clients); + } + + snprintf(debug_name, 64, "%u", client->pid); + client->debug_root = debugfs_create_file(debug_name, 0664, + dev->debug_root, client, + &debug_client_fops); + mutex_unlock(&dev->lock); + + return client; +} + +static void _ion_client_destroy(struct kref *kref) +{ + struct ion_client *client = container_of(kref, struct ion_client, ref); + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + mutex_lock(&dev->lock); + if (client->task) { + rb_erase(&client->node, &dev->user_clients); + put_task_struct(client->task); + } else { + rb_erase(&client->node, &dev->kernel_clients); + } + debugfs_remove_recursive(client->debug_root); + mutex_unlock(&dev->lock); + + kfree(client); +} + +static void ion_client_get(struct ion_client *client) +{ + kref_get(&client->ref); +} + +static int ion_client_put(struct ion_client *client) +{ + return kref_put(&client->ref, _ion_client_destroy); +} + +void ion_client_destroy(struct ion_client *client) +{ + ion_client_put(client); +} + +struct ion_client *ion_get_user_client(unsigned int fd_client) +{ + struct file *file; + struct ion_client *client = NULL; + + file = fget(fd_client); + if (!file) + return NULL; + + if (file->private_data) { + client = file->private_data; + client = ion_client_lookup(client->dev, current->group_leader); + + if (client && (file->private_data != client)) { + ion_client_put(client); + client = NULL; + } + + } + + fput(file); + + if (client == NULL) + pr_err("%s: not a valid file descriptor of user client for the" + " current process\n", __func__); + + return client; +} + +void ion_put_user_client(struct ion_client *user_client) +{ + ion_client_put(user_client); +} + +static int ion_share_release(struct inode *inode, struct file* file) +{ + struct ion_buffer *buffer = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* drop the reference to the buffer -- this prevents the + buffer from going away because the client holding it exited + while it was being passed */ + ion_buffer_put(buffer); + return 0; +} + +static void ion_vma_open(struct vm_area_struct *vma) +{ + + struct ion_buffer *buffer = vma->vm_file->private_data; + struct ion_handle *handle = vma->vm_private_data; + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* check that the client still exists and take a reference so + it can't go away until this vma is closed */ + client = ion_client_lookup(buffer->dev, current->group_leader); + if (IS_ERR_OR_NULL(client)) { + vma->vm_private_data = NULL; + return; + } + + if (!ion_handle_validate(client, handle)) { + ion_client_put(client); + vma->vm_private_data = NULL; + return; + } + + ion_handle_get(handle); + + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); +} + +static void ion_vma_close(struct vm_area_struct *vma) +{ + struct ion_handle *handle = vma->vm_private_data; + struct ion_buffer *buffer = vma->vm_file->private_data; + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* this indicates the client is gone, nothing to do here */ + if (!handle) + return; + client = handle->client; + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); + ion_handle_put(handle); + ion_client_put(client); + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); +} + +static struct vm_operations_struct ion_vm_ops = { + .open = ion_vma_open, + .close = ion_vma_close, +}; + +static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = file->private_data; + unsigned long size = vma->vm_end - vma->vm_start; + struct ion_client *client; + struct ion_handle *handle; + int ret; + + pr_debug("%s: %d\n", __func__, __LINE__); + /* make sure the client still exists, it's possible for the client to + have gone away but the map/share fd still to be around, take + a reference to it so it can't go away while this mapping exists */ + client = ion_client_lookup(buffer->dev, current->group_leader); + if (IS_ERR_OR_NULL(client)) { + pr_err("%s: trying to mmap an ion handle in a process with no " + "ion client\n", __func__); + return -EINVAL; + } + + if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > + buffer->size)) { + pr_err("%s: trying to map larger area than handle has available" + "\n", __func__); + ret = -EINVAL; + goto err; + } + + /* find the handle and take a reference to it */ + handle = ion_import(client, buffer); + if (IS_ERR_OR_NULL(handle)) { + ret = -EINVAL; + goto err; + } + + if (!handle->buffer->heap->ops->map_user) { + pr_err("%s: this heap does not define a method for mapping " + "to userspace\n", __func__); + ret = -EINVAL; + goto err1; + } + + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + if (ret) { + pr_err("%s: failure mapping buffer to userspace\n", + __func__); + goto err1; + } + + vma->vm_ops = &ion_vm_ops; + /* move the handle into the vm_private_data so we can access it from + vma_open/close */ + vma->vm_private_data = handle; + pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n", + __func__, __LINE__, + atomic_read(&client->ref.refcount), + atomic_read(&handle->ref.refcount), + atomic_read(&buffer->ref.refcount)); + return 0; + +err1: + /* drop the reference to the handle */ + ion_handle_put(handle); +err: + /* drop the reference to the client */ + ion_client_put(client); + return ret; +} + +static const struct file_operations ion_share_fops = { + .owner = THIS_MODULE, + .release = ion_share_release, + .mmap = ion_share_mmap, +}; + +static int ion_ioctl_share(struct file *parent, struct ion_client *client, + struct ion_handle *handle) +{ + int fd = get_unused_fd(); + struct file *file; + + if (fd < 0) + return fd; + + file = anon_inode_getfile("ion_share_fd", &ion_share_fops, + handle->buffer, O_RDWR); + if (IS_ERR_OR_NULL(file)) + goto err; + ion_buffer_get(handle->buffer); + fd_install(fd, file); + + return fd; + +err: + put_unused_fd(fd); + return -ENFILE; +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_allocation_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + data.handle = ion_alloc(client, data.len, data.align, + data.flags); + + if (IS_ERR(data.handle)) + return PTR_ERR(data.handle); + + if (copy_to_user((void __user *)arg, &data, sizeof(data))) { + ion_free(client, data.handle); + return -EFAULT; + } + break; + } + case ION_IOC_FREE: + { + struct ion_handle_data data; + bool valid; + + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_handle_data))) + return -EFAULT; + mutex_lock(&client->lock); + valid = ion_handle_validate(client, data.handle); + mutex_unlock(&client->lock); + if (!valid) + return -EINVAL; + ion_free(client, data.handle); + break; + } + case ION_IOC_MAP: + case ION_IOC_SHARE: + { + struct ion_fd_data data; + + if (copy_from_user(&data, (void __user *)arg, sizeof(data))) + return -EFAULT; + mutex_lock(&client->lock); + if (!ion_handle_validate(client, data.handle)) { + pr_err("%s: invalid handle passed to share ioctl.\n", + __func__); + mutex_unlock(&client->lock); + return -EINVAL; + } + data.fd = ion_ioctl_share(filp, client, data.handle); + mutex_unlock(&client->lock); + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + break; + } + case ION_IOC_IMPORT: + { + struct ion_fd_data data; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_fd_data))) + return -EFAULT; + + data.handle = ion_import_fd(client, data.fd); + if (IS_ERR(data.handle)) + data.handle = NULL; + if (copy_to_user((void __user *)arg, &data, + sizeof(struct ion_fd_data))) + return -EFAULT; + break; + } + case ION_IOC_CUSTOM: + { + struct ion_device *dev = client->dev; + struct ion_custom_data data; + + if (!dev->custom_ioctl) + return -ENOTTY; + if (copy_from_user(&data, (void __user *)arg, + sizeof(struct ion_custom_data))) + return -EFAULT; + return dev->custom_ioctl(client, data.cmd, data.arg); + } + default: + return -ENOTTY; + } + return 0; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_put(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + client = ion_client_create(dev, -1, "user"); + if (IS_ERR_OR_NULL(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + enum ion_heap_type type) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->type == type) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + char task_comm[TASK_COMM_LEN]; + size_t size = ion_debug_heap_total(client, heap->type); + if (!size) + continue; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, + size); + } + + for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->type); + if (!size) + continue; + seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, + size); + } + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + struct rb_node **p = &dev->heaps.rb_node; + struct rb_node *parent = NULL; + struct ion_heap *entry; + + heap->dev = dev; + mutex_lock(&dev->lock); + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_heap, node); + + if (heap->id < entry->id) { + p = &(*p)->rb_left; + } else if (heap->id > entry->id) { + p = &(*p)->rb_right; + } else { + pr_err("%s: can not insert multiple heaps with " + "id %d\n", __func__, heap->id); + goto end; + } + } + + rb_link_node(&heap->node, parent, p); + rb_insert_color(&heap->node, &dev->heaps); + debugfs_create_file(heap->name, 0664, dev->debug_root, heap, + &debug_heap_fops); +end: + mutex_unlock(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (IS_ERR_OR_NULL(idev->debug_root)) + pr_err("ion: failed to create debug files.\n"); + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->lock); + idev->heaps = RB_ROOT; + idev->user_clients = RB_ROOT; + idev->kernel_clients = RB_ROOT; + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c new file mode 100644 index 0000000..606adae --- /dev/null +++ b/drivers/gpu/ion/ion_carveout_heap.c @@ -0,0 +1,162 @@ +/* + * drivers/gpu/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/spinlock.h> + +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/ion.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion_priv.h" + +#include <asm/mach/map.h> + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + + if (!offset) + return ION_CARVEOUT_ALLOCATE_FAIL; + + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = buffer->priv_phys; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_phys = ion_carveout_allocate(heap, size, align); + return buffer->priv_phys == ION_CARVEOUT_ALLOCATE_FAIL ? -ENOMEM : 0; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + + ion_carveout_free(heap, buffer->priv_phys, buffer->size); + buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL; +} + +struct scatterlist *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return ERR_PTR(-EINVAL); +} + +void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +void *ion_carveout_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return __arch_ioremap(buffer->priv_phys, buffer->size, + MT_MEMORY_NONCACHED); +} + +void ion_carveout_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + __arch_iounmap(buffer->vaddr); + buffer->vaddr = NULL; + return; +} + +int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + return remap_pfn_range(vma, vma->vm_start, + __phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff, + buffer->size, + pgprot_noncached(vma->vm_page_prot)); +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_user = ion_carveout_heap_map_user, + .map_kernel = ion_carveout_heap_map_kernel, + .unmap_kernel = ion_carveout_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, + -1); + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c new file mode 100644 index 0000000..8ce3c19 --- /dev/null +++ b/drivers/gpu/ion/ion_heap.c @@ -0,0 +1,72 @@ +/* + * drivers/gpu/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include "ion_priv.h" + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %u\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h new file mode 100644 index 0000000..3323954 --- /dev/null +++ b/drivers/gpu/ion/ion_priv.h @@ -0,0 +1,184 @@ +/* + * drivers/gpu/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/ion.h> + +struct ion_mapping; + +struct ion_dma_mapping { + struct kref ref; + struct scatterlist *sglist; +}; + +struct ion_kernel_mapping { + struct kref ref; + void *vaddr; +}; + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sglist: the scatterlist for the buffer is dmap_cnt is not zero +*/ +struct ion_buffer { + struct kref ref; + struct rb_node node; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct scatterlist *sglist; +}; + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + */ +struct ion_heap_ops { + int (*allocate) (struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free) (struct ion_buffer *buffer); + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct scatterlist *(*map_dma) (struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); +}; + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct rb_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + int id; + const char *name; +}; + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +#endif /* _ION_PRIV_H */ diff --git a/drivers/gpu/ion/ion_system_heap.c b/drivers/gpu/ion/ion_system_heap.c new file mode 100644 index 0000000..c046cf1 --- /dev/null +++ b/drivers/gpu/ion/ion_system_heap.c @@ -0,0 +1,198 @@ +/* + * drivers/gpu/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion_priv.h" + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + buffer->priv_virt = vmalloc_user(size); + if (!buffer->priv_virt) + return -ENOMEM; + return 0; +} + +void ion_system_heap_free(struct ion_buffer *buffer) +{ + vfree(buffer->priv_virt); +} + +struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + struct page *page; + int i; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + void *vaddr = buffer->priv_virt; + + sglist = vmalloc(npages * sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + memset(sglist, 0, npages * sizeof(struct scatterlist)); + sg_init_table(sglist, npages); + for (i = 0; i < npages; i++) { + page = vmalloc_to_page(vaddr); + if (!page) + goto end; + sg_set_page(&sglist[i], page, PAGE_SIZE, 0); + vaddr += PAGE_SIZE; + } + /* XXX do cache maintenance for dma? */ + return sglist; +end: + vfree(sglist); + return NULL; +} + +void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + /* XXX undo cache maintenance for dma? */ + if (buffer->sglist) + vfree(buffer->sglist); +} + +void *ion_system_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +void ion_system_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + return remap_vmalloc_range(vma, buffer->priv_virt, vma->vm_pgoff); +} + +static struct ion_heap_ops vmalloc_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_system_heap_map_kernel, + .unmap_kernel = ion_system_heap_unmap_kernel, + .map_user = ion_system_heap_map_user, +}; + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &vmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM; + return heap; +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + buffer->priv_virt = kzalloc(len, GFP_KERNEL); + if (!buffer->priv_virt) + return -ENOMEM; + return 0; +} + +void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + kfree(buffer->priv_virt); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + *addr = virt_to_phys(buffer->priv_virt); + *len = buffer->size; + return 0; +} + +struct scatterlist *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sglist; + + sglist = vmalloc(sizeof(struct scatterlist)); + if (!sglist) + return ERR_PTR(-ENOMEM); + sg_init_table(sglist, 1); + sg_set_page(sglist, virt_to_page(buffer->priv_virt), buffer->size, 0); + return sglist; +} + +int ion_system_contig_heap_map_user(struct ion_heap *heap, + struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt)); + return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_system_heap_map_kernel, + .unmap_kernel = ion_system_heap_unmap_kernel, + .map_user = ion_system_contig_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + diff --git a/drivers/gpu/ion/ion_system_mapper.c b/drivers/gpu/ion/ion_system_mapper.c new file mode 100644 index 0000000..692458e --- /dev/null +++ b/drivers/gpu/ion/ion_system_mapper.c @@ -0,0 +1,114 @@ +/* + * drivers/gpu/ion/ion_system_mapper.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/memory.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion_priv.h" +/* + * This mapper is valid for any heap that allocates memory that already has + * a kernel mapping, this includes vmalloc'd memory, kmalloc'd memory, + * pages obtained via io_remap, etc. + */ +static void *ion_kernel_mapper_map(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping **mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { + pr_err("%s: attempting to map an unsupported heap\n", __func__); + return ERR_PTR(-EINVAL); + } + /* XXX REVISIT ME!!! */ + *((unsigned long *)mapping) = (unsigned long)buffer->priv; + return buffer->priv; +} + +static void ion_kernel_mapper_unmap(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping *mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) + pr_err("%s: attempting to unmap an unsupported heap\n", + __func__); +} + +static void *ion_kernel_mapper_map_kernel(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct ion_mapping *mapping) +{ + if (!((1 << buffer->heap->type) & mapper->heap_mask)) { + pr_err("%s: attempting to unmap an unsupported heap\n", + __func__); + return ERR_PTR(-EINVAL); + } + return buffer->priv; +} + +static int ion_kernel_mapper_map_user(struct ion_mapper *mapper, + struct ion_buffer *buffer, + struct vm_area_struct *vma, + struct ion_mapping *mapping) +{ + int ret; + + switch (buffer->heap->type) { + case ION_HEAP_KMALLOC: + { + unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv)); + ret = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + break; + } + case ION_HEAP_VMALLOC: + ret = remap_vmalloc_range(vma, buffer->priv, vma->vm_pgoff); + break; + default: + pr_err("%s: attempting to map unsupported heap to userspace\n", + __func__); + return -EINVAL; + } + + return ret; +} + +static struct ion_mapper_ops ops = { + .map = ion_kernel_mapper_map, + .map_kernel = ion_kernel_mapper_map_kernel, + .map_user = ion_kernel_mapper_map_user, + .unmap = ion_kernel_mapper_unmap, +}; + +struct ion_mapper *ion_system_mapper_create(void) +{ + struct ion_mapper *mapper; + mapper = kzalloc(sizeof(struct ion_mapper), GFP_KERNEL); + if (!mapper) + return ERR_PTR(-ENOMEM); + mapper->type = ION_SYSTEM_MAPPER; + mapper->ops = &ops; + mapper->heap_mask = (1 << ION_HEAP_VMALLOC) | (1 << ION_HEAP_KMALLOC); + return mapper; +} + +void ion_system_mapper_destroy(struct ion_mapper *mapper) +{ + kfree(mapper); +} + diff --git a/drivers/gpu/ion/tegra/Makefile b/drivers/gpu/ion/tegra/Makefile new file mode 100644 index 0000000..11cd003 --- /dev/null +++ b/drivers/gpu/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/gpu/ion/tegra/tegra_ion.c b/drivers/gpu/ion/tegra/tegra_ion.c new file mode 100644 index 0000000..7af6e16 --- /dev/null +++ b/drivers/gpu/ion/tegra/tegra_ion.c @@ -0,0 +1,96 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/ion.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include "../ion_priv.h" + +struct ion_device *idev; +struct ion_mapper *tegra_user_mapper; +int num_heaps; +struct ion_heap **heaps; + +int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); + return PTR_ERR(idev); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + return err; +} + +int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +static int __init ion_init(void) +{ + return platform_driver_register(&ion_driver); +} + +static void __exit ion_exit(void) +{ + platform_driver_unregister(&ion_driver); +} + +module_init(ion_init); +module_exit(ion_exit); + |