aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-exynos/include/mach/secmem.h11
-rw-r--r--arch/arm/mach-exynos/mach-midas.c25
-rw-r--r--arch/arm/mach-exynos/secmem-allocdev.c140
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/exynos_mem.c14
-rw-r--r--drivers/char/s3c_mem.c17
-rw-r--r--drivers/media/video/samsung/fimc/fimc_output.c145
-rw-r--r--drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c12
-rw-r--r--drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c76
-rw-r--r--fs/fuse/file.c36
-rw-r--r--include/linux/cma.h6
-rw-r--r--include/linux/highmem.h7
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mm.h12
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--mm/cma.c12
-rw-r--r--mm/memory.c110
-rw-r--r--mm/migrate-cma.c70
18 files changed, 685 insertions, 21 deletions
diff --git a/arch/arm/mach-exynos/include/mach/secmem.h b/arch/arm/mach-exynos/include/mach/secmem.h
index dfa86a4..9adcdf8 100644
--- a/arch/arm/mach-exynos/include/mach/secmem.h
+++ b/arch/arm/mach-exynos/include/mach/secmem.h
@@ -24,6 +24,17 @@ struct secchunk_info {
size_t size;
};
+struct secmem_fd_info {
+ uint32_t phys_addr;
+ size_t size;
+};
+
+struct secmem_fd_list {
+ struct secmem_fd_list *next;
+ struct secmem_fd_list *prev;
+ struct secmem_fd_info fdinfo;
+};
+
extern struct miscdevice secmem;
#if defined(CONFIG_ION)
struct secfd_info {
diff --git a/arch/arm/mach-exynos/mach-midas.c b/arch/arm/mach-exynos/mach-midas.c
index fc6c224..bf8776e 100644
--- a/arch/arm/mach-exynos/mach-midas.c
+++ b/arch/arm/mach-exynos/mach-midas.c
@@ -3423,11 +3423,36 @@ static void __init exynos4_reserve(void)
CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC1 * SZ_1K, 0x65800000, 0);
if (ret != 0)
panic("alloc failed for FIMC1\n");
+ else {
+ static struct cma_region fimc_reg = {
+ .name = "fimc1",
+ .size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC1 * SZ_1K,
+ .start = 0x65800000,
+ .reserved = 1,
+ };
+
+ if (cma_early_region_register(&fimc_reg))
+ pr_err("S5P/CMA: Failed to register '%s'\n",
+ fimc_reg.name);
+ }
#endif
#if defined(CONFIG_USE_MFC_CMA) && defined(CONFIG_MACH_M0)
ret = dma_declare_contiguous(&s5p_device_mfc.dev,
0x02800000, 0x5C800000, 0);
+
+ if (ret == 0) {
+ static struct cma_region mfc_reg = {
+ .name = "mfc",
+ .size = 0x02800000,
+ .start = 0x5C800000,
+ .reserved = 1,
+ };
+
+ if (cma_early_region_register(&mfc_reg))
+ pr_err("S5P/CMA: Failed to register '%s'\n",
+ mfc_reg.name);
+ }
#endif
if (ret != 0)
printk(KERN_ERR "%s Fail\n", __func__);
diff --git a/arch/arm/mach-exynos/secmem-allocdev.c b/arch/arm/mach-exynos/secmem-allocdev.c
index cab2bc5..29bd7da 100644
--- a/arch/arm/mach-exynos/secmem-allocdev.c
+++ b/arch/arm/mach-exynos/secmem-allocdev.c
@@ -34,6 +34,8 @@
struct miscdevice secmem;
struct secmem_crypto_driver_ftn *crypto_driver;
+struct secmem_fd_list g_fd_head;
+
#if defined(CONFIG_ION)
extern struct ion_device *ion_exynos;
#endif
@@ -63,9 +65,100 @@ static bool drm_onoff = false;
#define SECMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+
+static void secmem_fd_list_init(struct secmem_fd_list *list)
+{
+ list->next = list;
+ list->prev = list;
+ list->fdinfo.phys_addr = 0;
+ list->fdinfo.size = 0;
+}
+
+static void secmem_fd_list_clear(struct secmem_fd_list *head)
+{
+ head->next = head;
+ head->prev = head;
+}
+
+static void secmem_fd_list_add(struct secmem_fd_list *new, struct secmem_fd_list *head)
+{
+ head->next->prev = new;
+ new->next = head->next;
+ new->prev = head;
+ head->next = new;
+}
+
+static void secmem_fd_list_del(struct secmem_fd_list *list)
+{
+ list->prev->next = list->next;
+ list->next->prev = list->prev;
+}
+
+static void init_secmem_fd_list(void)
+{
+ secmem_fd_list_init(&g_fd_head);
+}
+
+static void clear_secmem_fd_list(void)
+{
+ secmem_fd_list_clear(&g_fd_head);
+}
+
+static struct secmem_fd_list *secmem_fd_list_find(struct secmem_fd_list *head, uint32_t phys_addr, size_t size)
+{
+ struct secmem_fd_list *pos;
+
+ for (pos = head->next; pos != head; pos = pos->next) {
+ if ((pos->fdinfo.phys_addr == phys_addr) &&
+ (pos->fdinfo.size >= size))
+ return pos;
+ }
+
+ return NULL;
+}
+
+static int find_secmem_fd_list(struct secmem_fd_list *head, uint32_t phys_addr, size_t size)
+{
+ struct secmem_fd_list *fd_ent = NULL;
+
+ fd_ent = secmem_fd_list_find(head, phys_addr, size);
+ if (fd_ent == NULL)
+ return -1;
+
+ return 0;
+}
+
+static void put_secmem_fd_list(struct secmem_fd_info *secmem_fd)
+{
+ struct secmem_fd_list *new = NULL;
+
+ new = (struct secmem_fd_list *)kzalloc(sizeof(struct secmem_fd_list), GFP_KERNEL);
+
+ new->fdinfo.phys_addr = secmem_fd->phys_addr;
+ new->fdinfo.size = secmem_fd->size;
+
+ secmem_fd_list_add(new, &g_fd_head);
+}
+
+static int del_secmem_fd_list(struct secmem_region *region)
+{
+ struct secmem_fd_list *fd_ent = NULL;
+
+ fd_ent = secmem_fd_list_find(&g_fd_head, region->phys_addr, region->len);
+ if (fd_ent == NULL)
+ return -1;
+
+ secmem_fd_list_del(fd_ent);
+ kfree(fd_ent);
+
+ return 0;
+}
+
static int secmem_mmap(struct file *file, struct vm_area_struct *vma)
{
+ int ret;
unsigned long size = vma->vm_end - vma->vm_start;
+ uint32_t phys_addr = vma->vm_pgoff << 12;
BUG_ON(!SECMEM_IS_PAGE_ALIGNED(vma->vm_start));
BUG_ON(!SECMEM_IS_PAGE_ALIGNED(vma->vm_end));
@@ -73,6 +166,12 @@ static int secmem_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ ret = find_secmem_fd_list(&g_fd_head, phys_addr, size);
+ if (ret < 0) {
+ printk(KERN_ERR "%s : Fail mmap due to Invalid address\n", __func__);
+ return -EAGAIN;
+ }
+
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
size, vma->vm_page_prot)) {
printk(KERN_ERR "%s : remap_pfn_range() failed!\n", __func__);
@@ -213,6 +312,7 @@ static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case SECMEM_IOC_GET_ADDR:
{
struct secmem_region region;
+ struct secmem_fd_info secmem_fd;
if (copy_from_user(&region, (void __user *)arg,
sizeof(struct secmem_region)))
@@ -224,12 +324,29 @@ static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
pr_info("SECMEM_IOC_GET_ADDR: size:%lu\n", region.len);
-
+#ifndef CONFIG_DMA_CMA
+ region.virt_addr = kmalloc(region.len, GFP_KERNEL | GFP_DMA);
+#else
region.virt_addr = dma_alloc_coherent(NULL, region.len,
&region.phys_addr, GFP_KERNEL);
- if (!region.virt_addr)
- panic("SECMEM_IOC_GET_ADDR: dma_alloc_coherent failed! "
- "size=%lu\n", region.len);
+#endif
+ if (!region.virt_addr) {
+ printk(KERN_ERR "%s: Get memory address failed. "
+ " [size : %ld]\n", __func__, region.len);
+ return -EFAULT;
+ }
+
+#ifndef CONFIG_DMA_CMA
+ region.phys_addr = virt_to_phys(region.virt_addr);
+
+ dma_map_single(secmem.this_device, region.virt_addr,
+ region.len, DMA_TO_DEVICE);
+#endif
+
+ secmem_fd.phys_addr = region.phys_addr;
+ secmem_fd.size = region.len;
+
+ put_secmem_fd_list(&secmem_fd);
if (copy_to_user((void __user *)arg, &region,
sizeof(struct secmem_region)))
@@ -250,8 +367,17 @@ static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
pr_info("SECMEM_IOC_RELEASE_ADDR: size:%lu\n", region.len);
+ if (del_secmem_fd_list(&region) < 0) {
+ printk(KERN_ERR "%s: Release memory failed.\n", __func__);
+ return -EFAULT;
+ }
+
+#ifndef CONFIG_DMA_CMA
+ kfree(region.virt_addr);
+#else
dma_free_coherent(NULL, region.len, region.virt_addr,
region.phys_addr);
+#endif
break;
}
@@ -292,9 +418,6 @@ static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
minfo.base = info.lower_bound;
minfo.size = info.total_size;
- printk("[minfo base] : 0x%x", minfo.base);
- printk("[minfo size] : 0x%x", minfo.size);
-
if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo)))
return -EFAULT;
break;
@@ -337,6 +460,8 @@ static int __init secmem_init(void)
crypto_driver = NULL;
+ init_secmem_fd_list();
+
pm_runtime_enable(secmem.this_device);
return 0;
@@ -345,6 +470,7 @@ static int __init secmem_init(void)
static void __exit secmem_exit(void)
{
__pm_runtime_disable(secmem.this_device, false);
+ clear_secmem_fd_list();
misc_deregister(&secmem);
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ea75975..ed0ddff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -638,6 +638,7 @@ config S3C_MEM
config EXYNOS_MEM
bool "Support for /dev/exynos-mem"
+ depends on CMA
default y
help
If you do say Y here, you can mmap using physically linear memories.
diff --git a/drivers/char/exynos_mem.c b/drivers/char/exynos_mem.c
index 85c7a29..cf546a1 100644
--- a/drivers/char/exynos_mem.c
+++ b/drivers/char/exynos_mem.c
@@ -18,6 +18,7 @@
#include <linux/uaccess.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
+#include <linux/cma.h>
#include <asm/cacheflush.h>
#include <plat/cpu.h>
@@ -83,6 +84,12 @@ static void cache_maint_phys(phys_addr_t start, size_t length, enum cacheop op)
size_t left = length;
phys_addr_t begin = start;
+ if (!cma_is_registered_region(start, length)) {
+ pr_err("[%s] handling non-cma region (%#x@%#x) is prohibited\n",
+ __func__, length, start);
+ return;
+ }
+
if (!soc_is_exynos5250() && !soc_is_exynos5210()) {
if (length > (size_t) L1_FLUSH_ALL) {
flush_cache_all();
@@ -257,10 +264,9 @@ int exynos_mem_mmap(struct file *filp, struct vm_area_struct *vma)
pfn = mem->phybase;
}
- /* TODO: currently lowmem is only avaiable */
- if ((phys_to_virt(start) < (void *)PAGE_OFFSET) ||
- (phys_to_virt(start) >= high_memory)) {
- pr_err("[%s] invalid paddr(0x%08x)\n", __func__, start);
+ if (!cma_is_registered_region(start, size)) {
+ pr_err("[%s] handling non-cma region (%#x@%#x) is prohibited\n",
+ __func__, size, start);
return -EINVAL;
}
diff --git a/drivers/char/s3c_mem.c b/drivers/char/s3c_mem.c
index 3e09dda..a8546d0 100644
--- a/drivers/char/s3c_mem.c
+++ b/drivers/char/s3c_mem.c
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/mman.h>
#include <linux/dma-mapping.h>
+#include <linux/cma.h>
#include <linux/unistd.h>
#include <linux/version.h>
@@ -324,6 +325,14 @@ long s3c_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_unlock(&mem_share_alloc_lock);
return -EFAULT;
}
+
+ if (!cma_is_registered_region(param.phy_addr, param.size)) {
+ pr_err("%s: %#x@%#x is allowed to map\n",
+ __func__, param.size, param.phy_addr);
+ mutex_unlock(&mem_cacheable_share_alloc_lock);
+ return -EINVAL;
+ }
+
flag = MEM_ALLOC_SHARE;
physical_address = param.phy_addr;
DEBUG("param.phy_addr = %08x, %d\n",
@@ -361,6 +370,14 @@ long s3c_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_unlock(&mem_cacheable_share_alloc_lock);
return -EFAULT;
}
+
+ if (!cma_is_registered_region(param.phy_addr, param.size)) {
+ pr_err("%s: %#x@%#x is allowed to map\n",
+ __func__, param.size, param.phy_addr);
+ mutex_unlock(&mem_cacheable_share_alloc_lock);
+ return -EINVAL;
+ }
+
flag = MEM_ALLOC_CACHEABLE_SHARE;
physical_address = param.phy_addr;
DEBUG("param.phy_addr = %08x, %d\n",
diff --git a/drivers/media/video/samsung/fimc/fimc_output.c b/drivers/media/video/samsung/fimc/fimc_output.c
index 57611e5..da651c5 100644
--- a/drivers/media/video/samsung/fimc/fimc_output.c
+++ b/drivers/media/video/samsung/fimc/fimc_output.c
@@ -2726,13 +2726,21 @@ static int fimc_update_in_queue_addr(struct fimc_control *ctrl,
int fimc_qbuf_output(void *fh, struct v4l2_buffer *b)
{
- struct fimc_buf *buf;
+ struct fimc_buf *buf = (struct fimc_buf *)b->m.userptr;
struct fimc_ctx *ctx;
struct fimc_control *ctrl = ((struct fimc_prv_data *)fh)->ctrl;
int ctx_id = ((struct fimc_prv_data *)fh)->ctx_id;
int idx, ctx_num;
int ret = -1;
+ u32 width;
+ u32 height;
+ u32 format;
+ u32 y_size = 0;
+ u32 cb_size = 0;
+ u32 cr_size = 0;
+ u32 size;
+
ctx = &ctrl->out->ctx[ctx_id];
fimc_info2("ctx(%d) queued idx = %d\n", ctx->ctx_num, b->index);
if (ctx->status == FIMC_STREAMOFF) {
@@ -2758,6 +2766,69 @@ int fimc_qbuf_output(void *fh, struct v4l2_buffer *b)
return -EINVAL;
}
+ /* Check input buffer for CMA region */
+ width = ctx->pix.width;
+ height = ctx->pix.height;
+ format = ctx->pix.pixelformat;
+ y_size = width * height;
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ y_size = y_size << 2;
+ size = PAGE_ALIGN(y_size);
+ break;
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ y_size = y_size << 1;
+ size = PAGE_ALIGN(y_size);
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV12M:
+ cb_size = y_size >> 2;
+ cr_size = y_size >> 2;
+ size = PAGE_ALIGN(y_size + cb_size + cr_size);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ cb_size = y_size >> 1;
+ size = PAGE_ALIGN(y_size + cb_size);
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ fimc_get_nv12t_size(width, height, &y_size, &cb_size);
+ size = PAGE_ALIGN(y_size + cb_size);
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ return -EINVAL;
+ }
+
+ if (buf->base[FIMC_ADDR_Y] != 0 && y_size != 0 &&
+ !cma_is_registered_region(buf->base[FIMC_ADDR_Y], y_size)) {
+ fimc_err("%s: Y address is not CMA region 0x%x, %d \n",
+ __func__, buf->base[FIMC_ADDR_Y], y_size);
+ return -EINVAL;
+ }
+ if (buf->base[FIMC_ADDR_CB] != 0 && cb_size != 0 &&
+ !cma_is_registered_region(buf->base[FIMC_ADDR_CB], cb_size)) {
+ fimc_err("%s: CB address is not CMA region 0x%x, %d \n",
+ __func__, buf->base[FIMC_ADDR_CB], cb_size);
+ return -EINVAL;
+ }
+ if (buf->base[FIMC_ADDR_CR] != 0 && cr_size != 0 &&
+ !cma_is_registered_region(buf->base[FIMC_ADDR_CR], cr_size)) {
+ fimc_err("%s: CR address is not CMA region 0x%x, %d \n",
+ __func__, buf->base[FIMC_ADDR_CR], cr_size);
+ return -EINVAL;
+ }
+ /* End check CMA region */
+
if ((ctrl->status == FIMC_READY_ON) ||
(ctrl->status == FIMC_STREAMON) ||
(ctrl->status == FIMC_STREAMON_IDLE)) {
@@ -2850,6 +2921,78 @@ int fimc_qbuf_output(void *fh, struct v4l2_buffer *b)
}
}
+ /* Check output buffer for CMA region */
+ width = ctx->fbuf.fmt.width;
+ height = ctx->fbuf.fmt.height;
+ format = ctx->fbuf.fmt.pixelformat;
+ y_size = width * height;
+ cb_size = 0;
+ cr_size = 0;
+
+ switch (format) {
+ case V4L2_PIX_FMT_RGB32:
+ y_size = y_size << 2;
+ size = PAGE_ALIGN(y_size);
+ break;
+ case V4L2_PIX_FMT_RGB565: /* fall through */
+ case V4L2_PIX_FMT_UYVY: /* fall through */
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_VYUY:
+ y_size = y_size << 1;
+ size = PAGE_ALIGN(y_size);
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_NV12M:
+ cb_size = y_size >> 2;
+ cr_size = y_size >> 2;
+ size = PAGE_ALIGN(y_size + cb_size + cr_size);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ cb_size = y_size >> 1;
+ size = PAGE_ALIGN(y_size + cb_size);
+ break;
+ case V4L2_PIX_FMT_NV12T:
+ fimc_get_nv12t_size(width, height, &y_size, &cb_size);
+ size = PAGE_ALIGN(y_size + cb_size);
+ break;
+ default:
+ fimc_err("%s: Invalid pixelformt : %d\n", __func__, format);
+ ret = -EINVAL;
+ goto err_routine;
+ }
+
+ if (ctx->dst[idx].base[FIMC_ADDR_Y] != 0 && y_size != 0 &&
+ !cma_is_registered_region((dma_addr_t)ctx->dst[idx].base[FIMC_ADDR_Y],
+ y_size)) {
+ fimc_err("%s: Y address is not CMA region 0x%x, %d \n",
+ __func__, ctx->dst[idx].base[FIMC_ADDR_Y], y_size);
+ ret = -EINVAL;
+ goto err_routine;
+ }
+ if (ctx->dst[idx].base[FIMC_ADDR_CB] != 0 && cb_size != 0 &&
+ !cma_is_registered_region((dma_addr_t)ctx->dst[idx].base[FIMC_ADDR_CB],
+ cb_size)) {
+ fimc_err("%s: CB address is not CMA region 0x%x, %d \n",
+ __func__, ctx->dst[idx].base[FIMC_ADDR_CB], cb_size);
+ ret = -EINVAL;
+ goto err_routine;
+ }
+ if (ctx->dst[idx].base[FIMC_ADDR_CR] != 0 && cr_size != 0 &&
+ !cma_is_registered_region((dma_addr_t)ctx->dst[idx].base[FIMC_ADDR_CR],
+ cr_size)) {
+ fimc_err("%s: CR address is not CMA region 0x%x, %d \n",
+ __func__, ctx->dst[idx].base[FIMC_ADDR_CR], cr_size);
+ ret = -EINVAL;
+ goto err_routine;
+ }
+ /* End check CMA region */
+
switch (ctx->overlay.mode) {
case FIMC_OVLY_FIFO:
ret = fimc_qbuf_output_fifo(ctrl, ctx, idx);
diff --git a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c
index 40508d5..6fa0e31 100644
--- a/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c
+++ b/drivers/media/video/samsung/fimg2d3x-exynos4/fimg2d_core.c
@@ -178,6 +178,18 @@ int g2d_do_blit(struct g2d_global *g2d_dev, g2d_params *params)
}
if (params->flag.memory_type == G2D_MEMORY_KERNEL) {
+#if defined(CONFIG_S5P_MEM_CMA)
+ if (!cma_is_registered_region((unsigned int)params->src_rect.addr,
+ GET_RECT_SIZE(params->src_rect))) {
+ printk(KERN_ERR "[%s] SRC Surface is not included in CMA region\n", __func__);
+ return -1;
+ }
+ if (!cma_is_registered_region((unsigned int)params->dst_rect.addr,
+ GET_RECT_SIZE(params->dst_rect))) {
+ printk(KERN_ERR "[%s] DST Surface is not included in CMA region\n", __func__);
+ return -1;
+ }
+#endif
params->src_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->src_rect.addr);
params->dst_rect.addr = (unsigned char *)phys_to_virt((unsigned long)params->dst_rect.addr);
pgd = (unsigned long)init_mm.pgd;
diff --git a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c
index 0c6c590..52160d6 100644
--- a/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c
+++ b/drivers/media/video/samsung/fimg2d4x-exynos4/fimg2d_ctx.c
@@ -18,10 +18,54 @@
#include "fimg2d_ctx.h"
#include "fimg2d_cache.h"
#include "fimg2d_helper.h"
+#if defined(CONFIG_CMA)
+#include <linux/cma.h>
+#endif
+
+static inline int is_yuvfmt(enum color_format fmt)
+{
+ switch (fmt) {
+ case CF_YCBCR_420:
+ case CF_YCBCR_422:
+ case CF_YCBCR_444:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * @plane: 0 for 1st plane, 1 for 2nd plane
+ */
+static int yuv_stride(int width, enum color_format cf, enum pixel_order order,
+ int plane)
+{
+ int bpp;
+
+ switch (cf) {
+ case CF_YCBCR_420:
+ bpp = (!plane) ? 8 : 4;
+ break;
+ case CF_YCBCR_422:
+ if (order == P2_CRCB || order == P2_CBCR)
+ bpp = 8;
+ else
+ bpp = (!plane) ? 16 : 0;
+ break;
+ case CF_YCBCR_444:
+ bpp = (!plane) ? 8 : 16;
+ break;
+ default:
+ bpp = 0;
+ break;
+ }
+
+ return width * bpp >> 3;
+}
static int fimg2d_check_params(struct fimg2d_bltcmd *cmd)
{
- int w, h, i;
+ int w, h, i, bw;
struct fimg2d_param *p = &cmd->param;
struct fimg2d_image *img;
struct fimg2d_scale *scl;
@@ -53,6 +97,36 @@ static int fimg2d_check_params(struct fimg2d_bltcmd *cmd)
r->x1 >= w || r->y1 >= h ||
r->x1 >= r->x2 || r->y1 >= r->y2)
return -1;
+#if defined(CONFIG_CMA)
+#if 0
+ if (img->addr.type == ADDR_PHYS) {
+ if (!cma_is_registered_region(img->addr.start, (h * img->stride))) {
+ printk(KERN_ERR "[%s] Surface[%d] is not included in CMA region\n", __func__, i);
+ return -1;
+ }
+ }
+#else
+ if (img->addr.type == ADDR_PHYS) {
+ if (is_yuvfmt(img->fmt))
+ bw = yuv_stride(img->width, img->fmt, img->order, 0);
+ else
+ bw = img->stride;
+
+ if (!cma_is_registered_region(img->addr.start, (h * bw))) {
+ printk(KERN_ERR "[%s] Surface[%d] is not included in CMA region\n", __func__, i);
+ return -1;
+ }
+
+ if (img->order == P2_CRCB || img->order == P2_CBCR) {
+ bw = yuv_stride(img->width, img->fmt, img->order, 1);
+ if (!cma_is_registered_region(img->plane2.start, (h * bw))) {
+ printk(KERN_ERR "[%s] plane2[%d] is not included in CMA region\n", __func__, i);
+ return -1;
+ }
+ }
+ }
+#endif
+#endif
}
clp = &p->clipping;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 82a6646..5f0d466 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -648,6 +648,42 @@ static int fuse_readpages_fill(void *_data, struct page *page)
return PTR_ERR(req);
}
}
+
+#ifdef CONFIG_DMA_CMA
+ if (is_cma_pageblock(page)) {
+ struct page *oldpage = page, *newpage;
+ int err;
+
+ /* make sure that old page is not free in-between the calls */
+ page_cache_get(oldpage);
+
+ newpage = alloc_page(GFP_HIGHUSER);
+ if (!newpage) {
+ page_cache_release(oldpage);
+ return -ENOMEM;
+ }
+
+ err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
+ if (err) {
+ __free_page(newpage);
+ page_cache_release(oldpage);
+ return err;
+ }
+
+ /*
+ * Decrement the count on new page to make page cache the only
+ * owner of it
+ */
+ lock_page(newpage);
+ put_page(newpage);
+
+ /* finally release the old page and swap pointers */
+ unlock_page(oldpage);
+ page_cache_release(oldpage);
+ page = newpage;
+ }
+#endif
+
page_cache_get(page);
req->pages[req->num_pages] = page;
req->num_pages++;
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 0287f4e..e1e70e93 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -490,4 +490,10 @@ void __init cma_early_regions_reserve(int (*reserve)(struct cma_region *reg));
#endif
+#ifdef CONFIG_CMA
+bool cma_is_registered_region(phys_addr_t start, size_t size);
+#else
+#define cma_is_registered_region(start, size) (false)
+#endif
+
#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index d6a3a55..ee879c7 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -185,6 +185,13 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
#ifdef CONFIG_DMA_CMA
static inline struct page *
+alloc_zeroed_user_highpage(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(0, vma, vaddr);
+}
+
+static inline struct page *
alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
unsigned long vaddr)
{
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 95323a6..82b1faf 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -20,7 +20,11 @@ extern int migrate_pages(struct list_head *l, new_page_t x,
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
bool sync, int tries);
+
+extern int migrate_replace_cma_page(struct page *oldpage,
+ struct page **newpage);
#endif
+
extern int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
bool sync);
@@ -48,7 +52,11 @@ static inline int migrate_pages(struct list_head *l, new_page_t x,
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
bool sync, int tries) { return -ENOSYS; }
+
+static inline int migrate_replace_cma_page(struct page *oldpage,
+ struct page **newpage) { return -ENOSYS; }
#endif
+
static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
bool sync) { return -ENOSYS; }
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2ce8e03..48f51d8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -154,6 +154,7 @@ extern pgprot_t protection_map[16];
#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT 0x10 /* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE 0x20 /* The fault task is in SIGKILL killable region */
+#define FAULT_FLAG_NO_CMA 0x80 /* don't use CMA pages */
/*
* This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -996,6 +997,16 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas);
+
+#ifdef CONFIG_DMA_CMA
+int get_user_pages_nocma(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas);
+#else
+#define get_user_pages_nocma(tsk, mm, start, nr_pages, wr, force, pgs, vmas) \
+ get_user_pages(tsk, mm, start, nr_pages, wr, force, pgs, vmas)
+#endif
+
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
struct page *get_dump_page(unsigned long addr);
@@ -1560,6 +1571,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address,
#define FOLL_MLOCK 0x40 /* mark page as mlocked */
#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
+#define FOLL_NO_CMA 0x200 /* avoid putting pages to CMA regions */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7933b74..716875e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -205,11 +205,7 @@ extern struct page *__page_cache_alloc(gfp_t gfp);
#else
static inline struct page *__page_cache_alloc(gfp_t gfp)
{
-#ifndef CONFIG_DMA_CMA
return alloc_pages(gfp, 0);
-#else
- return alloc_pages(gfp & ~__GFP_MOVABLE, 0);
-#endif
}
#endif
diff --git a/mm/cma.c b/mm/cma.c
index 546dd86..9d07750 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -280,6 +280,18 @@ static LIST_HEAD(cma_regions);
#define cma_foreach_region(reg) \
list_for_each_entry(reg, &cma_regions, list)
+bool cma_is_registered_region(phys_addr_t start, size_t size)
+{
+ struct cma_region *reg;
+
+ cma_foreach_region(reg) {
+ if ((start >= reg->start) &&
+ ((start + size) <= (reg->start + reg->size)))
+ return true;
+ }
+ return false;
+}
+
int __must_check cma_region_register(struct cma_region *reg)
{
const char *name, *alloc_name;
diff --git a/mm/memory.c b/mm/memory.c
index 326492a..159985d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -49,6 +49,7 @@
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/delayacct.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/writeback.h>
#include <linux/memcontrol.h>
@@ -57,6 +58,7 @@
#include <linux/swapops.h>
#include <linux/elf.h>
#include <linux/gfp.h>
+#include <linux/migrate.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
@@ -1592,6 +1594,25 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
stack_guard_page_end(vma, addr+PAGE_SIZE);
}
+#ifdef CONFIG_DMA_CMA
+static inline int __replace_cma_page(struct page *page, struct page **res)
+{
+ struct page *newpage;
+ int ret;
+
+ ret = migrate_replace_cma_page(page, &newpage);
+ if (ret == 0) {
+ *res = newpage;
+ return 0;
+ }
+ /*
+ * Migration errors in case of get_user_pages() might not
+ * be fatal to CMA itself, so better don't fail here.
+ */
+ return 0;
+}
+#endif
+
/**
* __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task
@@ -1742,6 +1763,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ret;
unsigned int fault_flags = 0;
+#ifdef CONFIG_DMA_CMA
+ if (gup_flags & FOLL_NO_CMA)
+ fault_flags = FAULT_FLAG_NO_CMA;
+#endif
+
/* For mlock, just skip the stack guard page. */
if (foll_flags & FOLL_MLOCK) {
if (stack_guard_page(vma, start))
@@ -1807,6 +1833,16 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
+
+#ifdef CONFIG_DMA_CMA
+ if ((gup_flags & FOLL_NO_CMA)
+ && is_cma_pageblock(page)) {
+ int rc = __replace_cma_page(page, &page);
+ if (rc)
+ return i ? i : rc;
+ }
+#endif
+
if (pages) {
pages[i] = page;
@@ -1950,6 +1986,26 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
+#ifdef CONFIG_DMA_CMA
+int get_user_pages_nocma(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int nr_pages, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ int flags = FOLL_TOUCH | FOLL_NO_CMA;
+
+ if (pages)
+ flags |= FOLL_GET;
+ if (write)
+ flags |= FOLL_WRITE;
+ if (force)
+ flags |= FOLL_FORCE;
+
+ return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+ NULL);
+}
+EXPORT_SYMBOL(get_user_pages_nocma);
+#endif
+
/**
* get_dump_page() - pin user page in memory while writing it to core dump
* @addr: user address
@@ -2485,7 +2541,11 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
*/
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *page_table, pmd_t *pmd,
+#ifdef CONFIG_DMA_CMA
+ spinlock_t *ptl, pte_t orig_pte, unsigned int flags)
+#else
spinlock_t *ptl, pte_t orig_pte)
+#endif
__releases(ptl)
{
struct page *old_page, *new_page;
@@ -2657,11 +2717,25 @@ gotten:
goto oom;
if (is_zero_pfn(pte_pfn(orig_pte))) {
- new_page = alloc_zeroed_user_highpage_movable(vma, address);
+#ifdef CONFIG_DMA_CMA
+ if (flags & FAULT_FLAG_NO_CMA)
+ new_page = alloc_zeroed_user_highpage(vma, address);
+ else
+#endif
+ new_page =
+ alloc_zeroed_user_highpage_movable(vma, address);
+
if (!new_page)
goto oom;
} else {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+#ifdef CONFIG_DMA_CMA
+ if (flags & FAULT_FLAG_NO_CMA)
+ new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
+ else
+#endif
+ new_page =
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
@@ -2888,6 +2962,16 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
entry = pte_to_swp_entry(orig_pte);
if (unlikely(non_swap_entry(entry))) {
if (is_migration_entry(entry)) {
+#ifdef CONFIG_DMA_CMA
+ /*
+ * FIXME: mszyprow: cruel, brute-force method for
+ * letting cma/migration to finish it's job without
+ * stealing the lock migration_entry_wait() and creating
+ * a live-lock on the faulted page
+ * (page->_count == 2 migration failure issue)
+ */
+ mdelay(10);
+#endif
migration_entry_wait(mm, pmd, address);
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
@@ -3021,7 +3105,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
if (flags & FAULT_FLAG_WRITE) {
+#ifdef CONFIG_DMA_CMA
+ ret |= do_wp_page(mm, vma, address, page_table,
+ pmd, ptl, pte, flags);
+#else
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
+#endif
if (ret & VM_FAULT_ERROR)
ret &= VM_FAULT_ERROR;
goto out;
@@ -3213,8 +3302,16 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
- vma, address);
+
+#ifdef CONFIG_DMA_CMA
+ if (flags & FAULT_FLAG_NO_CMA)
+ page = alloc_page_vma(GFP_HIGHUSER,
+ vma, address);
+ else
+#endif
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+ vma, address);
+
if (!page) {
ret = VM_FAULT_OOM;
goto out;
@@ -3422,8 +3519,13 @@ int handle_pte_fault(struct mm_struct *mm,
goto unlock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry))
+#ifdef CONFIG_DMA_CMA
+ return do_wp_page(mm, vma, address,
+ pte, pmd, ptl, entry, flags);
+#else
return do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
+#endif
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
diff --git a/mm/migrate-cma.c b/mm/migrate-cma.c
index 22cd0a3..c224481 100644
--- a/mm/migrate-cma.c
+++ b/mm/migrate-cma.c
@@ -1136,6 +1136,76 @@ out:
return nr_failed + retry;
}
+static struct page *
+__migrate_replace_alloc(struct page *page, unsigned long private,
+ int **resultp)
+{
+ struct page **newpage = (struct page **)private;
+ *newpage = alloc_page(GFP_USER);
+
+ return *newpage;
+}
+
+/*
+ * migrate_replace_single_page
+ *
+ * The function takes one single page (oldpage) and a target page
+ * (newpage) and tries to migrate data to the target page. The caller
+ * must ensure that the source page is locked with one additional
+ * get_page() call, which will be freed during the migration.
+ *
+ * Return: error code or 0 on success.
+ */
+int migrate_replace_cma_page(struct page *oldpage, struct page **newpage)
+{
+ /* This function is based on compact_zone() from compaction.c. */
+ unsigned long pfn = page_to_pfn(oldpage);
+ int ret;
+ struct compact_control cc = {
+ .nr_migratepages = 0,
+ .order = 0,
+ .zone = page_zone(oldpage),
+ .sync = true,
+ };
+ INIT_LIST_HEAD(&cc.migratepages);
+
+ migrate_prep_local();
+
+ pfn = isolate_migratepages_range(cc.zone, &cc, pfn, pfn+1);
+ if (!pfn || list_empty(&cc.migratepages))
+ goto putback;
+
+ /*
+ * Put the additional reference to the old page, now migration code
+ * owns it
+ */
+ put_page(oldpage);
+
+ ret = migrate_pages(&cc.migratepages, __migrate_replace_alloc,
+ (unsigned long)newpage, false, true, 0);
+
+ if (ret == 0) {
+ /*
+ * Do the same as follow_page() did with oldpage and
+ * return
+ */
+ get_page_foll(*newpage);
+ return 0;
+ }
+
+ if (is_failed_page(oldpage, 0, 0))
+ dump_page(oldpage);
+
+ /*
+ * Restore additional reference to the old page before giving it back
+ * to lru
+ */
+ get_page(oldpage);
+putback:
+ putback_lru_pages(&cc.migratepages);
+ return -EBUSY;
+}
+
#ifdef CONFIG_NUMA
/*
* Move a list of individual pages