/* linux/drivers/char/exynos_mem.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include /* error codes */ #include #include #include #include #include #include #include #include #include #include #include #include #define L2_FLUSH_ALL SZ_1M #define L1_FLUSH_ALL SZ_64K struct exynos_mem { bool cacheable; unsigned int phybase; }; int exynos_mem_open(struct inode *inode, struct file *filp) { struct exynos_mem *prv_data; prv_data = kzalloc(sizeof(struct exynos_mem), GFP_KERNEL); if (!prv_data) { pr_err("%s: not enough memory\n", __func__); return -ENOMEM; } prv_data->cacheable = true; /* Default: cacheable */ filp->private_data = prv_data; printk(KERN_DEBUG "[%s:%d] private_data(0x%08x)\n", __func__, __LINE__, (u32)prv_data); return 0; } int exynos_mem_release(struct inode *inode, struct file *filp) { printk(KERN_DEBUG "[%s:%d] private_data(0x%08x)\n", __func__, __LINE__, (u32)filp->private_data); kfree(filp->private_data); return 0; } enum cacheop { EM_CLEAN, EM_INV, EM_FLUSH }; static void cache_maint_inner(void *vaddr, size_t size, enum cacheop op) { switch (op) { case EM_CLEAN: dmac_map_area(vaddr, size, DMA_TO_DEVICE); break; case EM_INV: dmac_unmap_area(vaddr, size, DMA_TO_DEVICE); break; case EM_FLUSH: dmac_flush_range(vaddr, vaddr + size); } } static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op) { size_t left = length; phys_addr_t begin = start; if (!soc_is_exynos5250() && !soc_is_exynos5210()) { if (length > (size_t) L1_FLUSH_ALL) { flush_cache_all(); smp_call_function( (smp_call_func_t)__cpuc_flush_kern_all, NULL, 1); goto outer_cache_ops; } } #ifdef CONFIG_HIGHMEM do { size_t len; struct page *page; void *vaddr; off_t offset; page = phys_to_page(start); offset = offset_in_page(start); len = PAGE_SIZE - offset; if (left < len) len = left; if (PageHighMem(page)) { vaddr = kmap(page); cache_maint_inner(vaddr + offset, len, op); kunmap(page); } else { vaddr = page_address(page) + offset; cache_maint_inner(vaddr, len, op); } left -= len; start += len; } while (left); #else cache_maint_inner(phys_to_virt(begin), left, op); #endif outer_cache_ops: switch (op) { case EM_CLEAN: outer_clean_range(begin, begin + length); break; case EM_INV: if (length <= L2_FLUSH_ALL) { outer_inv_range(begin, begin + length); break; } /* else FALL THROUGH */ case EM_FLUSH: outer_flush_range(begin, begin + length); break; } } static void exynos_mem_paddr_cache_clean(dma_addr_t start, size_t length) { if (length > (size_t) L2_FLUSH_ALL) { flush_cache_all(); /* L1 */ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1); outer_clean_all(); /* L2 */ } else if (length > (size_t) L1_FLUSH_ALL) { dma_addr_t end = start + length - 1; flush_cache_all(); /* L1 */ smp_call_function((smp_call_func_t)__cpuc_flush_kern_all, NULL, 1); outer_clean_range(start, end); /* L2 */ } else { dma_addr_t end = start + length - 1; dmac_flush_range(phys_to_virt(start), phys_to_virt(end)); outer_clean_range(start, end); /* L2 */ } } long exynos_mem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case EXYNOS_MEM_SET_CACHEABLE: { struct exynos_mem *mem = filp->private_data; int cacheable; if (get_user(cacheable, (u32 __user *)arg)) { pr_err("[%s:%d] err: EXYNOS_MEM_SET_CACHEABLE\n", __func__, __LINE__); return -EFAULT; } mem->cacheable = cacheable; break; } case EXYNOS_MEM_PADDR_CACHE_FLUSH: { struct exynos_mem_flush_range range; if (copy_from_user(&range, (struct exynos_mem_flush_range __user *)arg, sizeof(range))) { pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n", __func__, __LINE__); return -EFAULT; } cache_maint_phys(range.start, range.length, EM_FLUSH); break; } case EXYNOS_MEM_PADDR_CACHE_CLEAN: { struct exynos_mem_flush_range range; if (copy_from_user(&range, (struct exynos_mem_flush_range __user *)arg, sizeof(range))) { pr_err("[%s:%d] err: EXYNOS_MEM_PADDR_CACHE_FLUSH\n", __func__, __LINE__); return -EFAULT; } cache_maint_phys(range.start, range.length, EM_CLEAN); break; } case EXYNOS_MEM_SET_PHYADDR: { struct exynos_mem *mem = filp->private_data; int phyaddr; if (get_user(phyaddr, (u32 __user *)arg)) { pr_err("[%s:%d] err: EXYNOS_MEM_SET_PHYADDR\n", __func__, __LINE__); return -EFAULT; } mem->phybase = phyaddr >> PAGE_SHIFT; break; } default: pr_err("[%s:%d] error command\n", __func__, __LINE__); return -EINVAL; } return 0; } static void exynos_mem_mmap_open(struct vm_area_struct *vma) { printk(KERN_DEBUG "[%s] addr(0x%08x)\n", __func__, (u32)vma->vm_start); } static void exynos_mem_mmap_close(struct vm_area_struct *vma) { printk(KERN_DEBUG "[%s] addr(0x%08x)\n", __func__, (u32)vma->vm_start); } static struct vm_operations_struct exynos_mem_ops = { .open = exynos_mem_mmap_open, .close = exynos_mem_mmap_close, }; static struct simple_cma_descriptor cmad_container[CMA_REGION_COUNT]; static int cmad_container_stored = 0; void cma_region_descriptor_add(const char *name, int start, int size) { int i; pr_info("[%s] adding [%s] (0x%08x)-(0x%08x)\n", __func__, name, start, size); if(cmad_container_stored == CMA_REGION_COUNT - 1) return; i = cmad_container_stored; cmad_container[i].name = name; cmad_container[i].start = start; cmad_container[i].size = size; cmad_container_stored++; } int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma) { struct exynos_mem *mem = (struct exynos_mem *)filp->private_data; bool cacheable = mem->cacheable; dma_addr_t start = 0; u32 pfn = 0; u32 size = vma->vm_end - vma->vm_start; int i, allowed; struct simple_cma_descriptor *b; allowed = false; if (vma->vm_pgoff) { start = vma->vm_pgoff << PAGE_SHIFT; pfn = vma->vm_pgoff; } else { start = mem->phybase << PAGE_SHIFT; pfn = mem->phybase; } pr_info("[%s] requesting access to (0x%08x)-(0x%08x)\n", __func__, start, (start + size)); b = (struct simple_cma_descriptor*)&cmad_container; /* Go over all of the defined CMA blocks */ for(i = 0; i < cmad_container_stored; i++) { pr_info("[%s] Checking space paddr(0x%08x)-(0x%08x) from '%s'\n", __func__, b->start, (b->start + b->size), b->name); /* Check if the requested space is within this current CMA block */ if(start >= b->start && (start + size) <= (b->start + b->size)){ /* Further only conditionally whitelist spaces that we know * break device functionality if we don't allow access. * * Add exceptions as we go. */ if(strcmp(b->name, "s3c-fimc") == 0 || strcmp(b->name, "fimc1") == 0) { allowed = true; pr_info("[%s] Accessing space 0x%08x/0x%08x for '%s'\n", __func__, b->start, b->size, b->name); } } b++; } if (!allowed) { /* The requested memory space isn't in any CMA block, deny access */ pr_err("[%s] invalid paddr(0x%08x)-(0x%08x), accessing outside of DMA spaces\n", __func__, start, (start + size)); return -EINVAL; } /* The check below doesn't matter anymore */ /* TODO: currently lowmem is only avaiable */ if ((phys_to_virt(start) < (void *)PAGE_OFFSET) || (phys_to_virt(start) >= high_memory)) { pr_err("[%s] invalid paddr(0x%08x)\n", __func__, start); return -EINVAL; } if (!cacheable) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_RESERVED; vma->vm_ops = &exynos_mem_ops; if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) { pr_err("writable mapping must be shared\n"); return -EINVAL; } if (remap_pfn_range(vma, vma->vm_start, pfn, size, vma->vm_page_prot)) { pr_err("mmap fail\n"); return -EINVAL; } vma->vm_ops->open(vma); return 0; }