aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/exynos
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/exynos')
-rw-r--r--drivers/gpu/drm/exynos/Kconfig29
-rw-r--r--drivers/gpu/drm/exynos/Makefile6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c296
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c180
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c125
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c81
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1925
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c1543
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c251
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c547
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1385
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c212
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h47
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1638
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h191
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c246
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c751
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ump.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c208
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c10
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h295
35 files changed, 8088 insertions, 2889 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 3d002cc..b4ae4c5 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -15,6 +15,12 @@ config DRM_EXYNOS_MEMSIZE
depends on DRM_EXYNOS
default "32768"
+config DRM_EXYNOS_IOMMU
+ bool "GEM IOMMU Support"
+ depends on DRM_EXYNOS
+ help
+ Choose this option if you want to use GEM IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "GEM DMABUF Support"
depends on DRM_EXYNOS
@@ -54,8 +60,29 @@ config DRM_EXYNOS_G2D
help
Choose this option if you want to use Samsung G2D for DRM.
+config DRM_EXYNOS_IPP
+ bool "Samsung DRM IPP"
+ depends on DRM_EXYNOS
+ help
+ This option enables IPP drivers for FIMC, GSC, Rotator.
+ It can support user space ioctl for each devices.
+ ipp operations support rotating and scaling, writeback, flip, output
+ when enable this options.
+
config DRM_EXYNOS_ROTATOR
bool "Samsung DRM Rotator"
- depends on DRM_EXYNOS
+ depends on DRM_EXYNOS_IPP
help
Choose this option if you want to use Samsung Rotator for DRM.
+
+config DRM_EXYNOS_FIMC
+ bool "Samsung DRM FIMC"
+ depends on DRM_EXYNOS_IPP
+ help
+ Choose this option if you want to use Samsung FIMC for DRM.
+
+config DRM_EXYNOS_GSC
+ bool "Samsung DRM GSC"
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ help
+ Choose this option if you want to use Samsung GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index f8a88f3..369b3a5 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -7,14 +7,18 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos \
exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
exynos_drm_buf.o exynos_drm_ump.o exynos_drm_gem.o \
- exynos_drm_core.o exynos_drm_plane.o exynos_drm_iommu.o
+ exynos_drm_core.o exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
exynos_hdmiphy.o exynos_drm_hdmi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index f7bb7be..a0b27d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -47,7 +47,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return -EINVAL;
}
- if (buf->dma_addr) {
+ if (buf->paddr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
@@ -78,21 +78,21 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
}
#ifdef CONFIG_CMA
- buf->dma_addr = cma_alloc(dev->dev, "drm", buf->size,
+ buf->paddr = cma_alloc(dev->dev, "drm", buf->size,
buf->page_size);
- if (IS_ERR((void *)buf->dma_addr)) {
+ if (IS_ERR((void *)buf->paddr)) {
DRM_DEBUG_KMS("cma_alloc of size %ld failed\n",
buf->size);
ret = -ENOMEM;
goto err1;
}
- buf->kvaddr = phys_to_virt(buf->dma_addr);
+ buf->kvaddr = phys_to_virt(buf->paddr);
#else
/* align it as page size(page or section) TODO */
buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
+ &buf->paddr, GFP_KERNEL);
if (!buf->kvaddr) {
DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM;
@@ -107,7 +107,7 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
}
sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
+ start_addr = buf->paddr;
while (i < npages) {
buf->pages[i] = phys_to_page(start_addr);
@@ -118,20 +118,20 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
i++;
}
- DRM_INFO("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_INFO("vaddr(0x%lx), paddr(0x%lx), size(0x%lx)\n",
(unsigned long)buf->kvaddr,
- (unsigned long)buf->dma_addr,
+ (unsigned long)buf->paddr,
buf->size);
return ret;
err2:
#ifdef CONFIG_CMA
- cma_free(buf->dma_addr);
+ cma_free(buf->paddr);
#else
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ (dma_addr_t)buf->paddr);
#endif
- buf->dma_addr = (dma_addr_t)NULL;
+ buf->paddr = (dma_addr_t)NULL;
err1:
sg_free_table(buf->sgt);
kfree(buf->sgt);
@@ -146,13 +146,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
DRM_DEBUG_KMS("%s.\n", __FILE__);
/*
- * now buffer is being shared and it would be released
- * by original owner so ignor free action.
- */
- if (buf->shared || atomic_read(&buf->shared_refcount))
- return;
-
- /*
* release only physically continuous memory and
* non-continuous memory would be released by exynos
* gem framework.
@@ -162,25 +155,36 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
return;
}
- if (!buf->dma_addr) {
- DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ if (!buf->paddr) {
+ DRM_DEBUG_KMS("paddr is invalid.\n");
return;
}
- sg_free_table(buf->sgt);
-
- kfree(buf->sgt);
- buf->sgt = NULL;
+ if (buf->sgt) {
+ sg_free_table(buf->sgt);
+ kfree(buf->sgt);
+ buf->sgt = NULL;
+ }
kfree(buf->pages);
buf->pages = NULL;
+
+ /*
+ * now buffer is being shared and it would be released
+ * by original owner so ignor free action.
+ * this buffer was imported from physical memory to gem directly
+ * and this feature is used temporarily so removed later.
+ */
+ if (buf->shared)
+ return;
+
#ifdef CONFIG_CMA
- cma_free(buf->dma_addr);
+ cma_free(buf->paddr);
#else
dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ (dma_addr_t)buf->paddr);
#endif
- buf->dma_addr = (dma_addr_t)NULL;
+ buf->paddr = (dma_addr_t)NULL;
}
struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
@@ -221,7 +225,7 @@ int exynos_drm_alloc_buf(struct drm_device *dev,
/*
* allocate memory region and set the memory information
- * to vaddr and dma_addr of a buffer object.
+ * to vaddr and paddr of a buffer object.
*/
if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
return -ENOMEM;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index eaf630d..80cba2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -33,7 +33,6 @@
#include "exynos_drm_fbdev.h"
static LIST_HEAD(exynos_drm_subdrv_list);
-static struct drm_device *drm_dev;
static int exynos_drm_subdrv_probe(struct drm_device *dev,
struct exynos_drm_subdrv *subdrv)
@@ -95,7 +94,7 @@ static void exynos_drm_subdrv_remove(struct drm_device *dev,
DRM_DEBUG_DRIVER("%s\n", __FILE__);
if (subdrv->remove)
- subdrv->remove(dev);
+ subdrv->remove(dev, subdrv->dev);
if (subdrv->encoder) {
struct drm_encoder *encoder = subdrv->encoder;
@@ -120,8 +119,6 @@ int exynos_drm_device_register(struct drm_device *dev)
if (!dev)
return -EINVAL;
- drm_dev = dev;
-
list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
subdrv->drm_dev = dev;
err = exynos_drm_subdrv_probe(dev, subdrv);
@@ -149,8 +146,6 @@ int exynos_drm_device_unregister(struct drm_device *dev)
list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
exynos_drm_subdrv_remove(dev, subdrv);
- drm_dev = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 4afb625..5904c58 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,21 +29,23 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
-#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
-#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
+enum exynos_crtc_mode {
+ CRTC_MODE_NORMAL, /* normal mode */
+ CRTC_MODE_BLANK, /* The private plane of crtc is blank */
+};
+
/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
- * @overlay: contain information common to display controller and hdmi and
- * contents of this overlay object would be copied to sub driver size.
+ * @drm_plane: pointer of private plane object for this crtc
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
@@ -52,115 +54,16 @@
* we can refer to the crtc to current hardware interrupt occured through
* this pipe value.
* @dpms: store the crtc dpms value
+ * @mode: store the crtc mode value
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
- struct exynos_drm_overlay overlay;
+ struct drm_plane *plane;
unsigned int pipe;
unsigned int dpms;
+ enum exynos_crtc_mode mode;
};
-static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
- struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
-
- exynos_drm_fn_encoder(crtc, overlay,
- exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
- exynos_drm_encoder_crtc_commit);
-}
-
-int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos)
-{
- struct exynos_drm_gem_buf *buffer;
- unsigned int actual_w;
- unsigned int actual_h;
- int nr = exynos_drm_format_num_buffers(fb->pixel_format);
- int i;
-
- for (i = 0; i < nr; i++) {
- buffer = exynos_drm_fb_buffer(fb, i);
- if (!buffer) {
- DRM_LOG_KMS("buffer is null\n");
- return -EFAULT;
- }
-
- overlay->dma_addr[i] = buffer->dma_addr;
- overlay->vaddr[i] = buffer->kvaddr;
-
- DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
- i, (unsigned long)overlay->vaddr[i],
- (unsigned long)overlay->dma_addr[i]);
- }
-
- actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
- actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
-
- /* set drm framebuffer data. */
- overlay->fb_x = pos->fb_x;
- overlay->fb_y = pos->fb_y;
- overlay->fb_width = fb->width;
- overlay->fb_height = fb->height;
- overlay->src_width = pos->src_w;
- overlay->src_height = pos->src_h;
- overlay->bpp = fb->bits_per_pixel;
- overlay->pitch = fb->pitches[0];
- overlay->pixel_format = fb->pixel_format;
-
- /* set overlay range to be displayed. */
- overlay->crtc_x = pos->crtc_x;
- overlay->crtc_y = pos->crtc_y;
- overlay->crtc_width = actual_w;
- overlay->crtc_height = actual_h;
-
- /* set drm mode data. */
- overlay->mode_width = mode->hdisplay;
- overlay->mode_height = mode->vdisplay;
- overlay->refresh = mode->vrefresh;
- overlay->scan_flag = mode->flags;
-
- DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
- overlay->crtc_x, overlay->crtc_y,
- overlay->crtc_width, overlay->crtc_height);
-
- return 0;
-}
-
-static int exynos_drm_crtc_update(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc;
- struct exynos_drm_overlay *overlay;
- struct exynos_drm_crtc_pos pos;
- struct drm_display_mode *mode = &crtc->mode;
- struct drm_framebuffer *fb = crtc->fb;
-
- if (!mode || !fb)
- return -EINVAL;
-
- exynos_crtc = to_exynos_crtc(crtc);
- overlay = &exynos_crtc->overlay;
-
- memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
-
- /* it means the offset of framebuffer to be displayed. */
- pos.fb_x = crtc->x;
- pos.fb_y = crtc->y;
-
- /* OSD position to be displayed. */
- pos.crtc_x = 0;
- pos.crtc_y = 0;
- pos.crtc_w = fb->width - crtc->x;
- pos.crtc_h = fb->height - crtc->y;
- pos.src_w = pos.crtc_w;
- pos.src_h = pos.crtc_h;
-
- return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
-}
-
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
@@ -175,23 +78,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
mutex_lock(&dev->struct_mutex);
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- exynos_drm_fn_encoder(crtc, &mode,
- exynos_drm_encoder_crtc_dpms);
- exynos_crtc->dpms = mode;
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- exynos_drm_fn_encoder(crtc, &mode,
- exynos_drm_encoder_crtc_dpms);
- exynos_crtc->dpms = mode;
- break;
- default:
- DRM_ERROR("unspecified mode %d\n", mode);
- break;
- }
+ exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
mutex_unlock(&dev->struct_mutex);
}
@@ -209,30 +97,8 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%s\n", __FILE__);
- /*
- * when set_crtc is requested from user or at booting time,
- * crtc->commit would be called without dpms call so if dpms is
- * no power on then crtc->dpms should be called
- * with DRM_MODE_DPMS_ON for the hardware power to be on.
- */
- if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
- int mode = DRM_MODE_DPMS_ON;
-
- /*
- * enable hardware(power on) to all encoders hdmi connected
- * to current crtc.
- */
- exynos_drm_crtc_dpms(crtc, mode);
- /*
- * enable dma to all encoders connected to current crtc and
- * lcd panel.
- */
- exynos_drm_fn_encoder(crtc, &mode,
- exynos_drm_encoder_dpms_from_crtc);
- }
-
- exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
- exynos_drm_encoder_crtc_commit);
+ exynos_plane_commit(exynos_crtc->plane);
+ exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON);
}
static bool
@@ -251,31 +117,61 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane = exynos_crtc->plane;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
+ int pipe = exynos_crtc->pipe;
+ int ret;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
* so that hardware can be seet to proper mode.
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
- return exynos_drm_crtc_update(crtc);
+ crtc_w = crtc->fb->width - x;
+ crtc_h = crtc->fb->height - y;
+
+ ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+ x, y, crtc_w, crtc_h);
+ if (ret)
+ return ret;
+
+ plane->crtc = crtc;
+ plane->fb = crtc->fb;
+
+ exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe);
+
+ return 0;
}
static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_plane *plane = exynos_crtc->plane;
+ unsigned int crtc_w;
+ unsigned int crtc_h;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- ret = exynos_drm_crtc_update(crtc);
+ crtc_w = crtc->fb->width - x;
+ crtc_h = crtc->fb->height - y;
+
+ ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h,
+ x, y, crtc_w, crtc_h);
if (ret)
return ret;
- exynos_drm_crtc_apply(crtc);
+ exynos_drm_crtc_commit(crtc);
- return ret;
+ return 0;
}
static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
@@ -284,6 +180,16 @@ static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
/* drm framework doesn't check NULL */
}
+static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF);
+ exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.dpms = exynos_drm_crtc_dpms,
.prepare = exynos_drm_crtc_prepare,
@@ -292,6 +198,7 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.mode_set = exynos_drm_crtc_mode_set,
.mode_set_base = exynos_drm_crtc_mode_set_base,
.load_lut = exynos_drm_crtc_load_lut,
+ .disable = exynos_drm_crtc_disable,
};
static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
@@ -327,7 +234,8 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
&dev_priv->pageflip_event_list);
crtc->fb = fb;
- ret = exynos_drm_crtc_update(crtc);
+ ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
+ NULL);
if (ret) {
crtc->fb = old_fb;
drm_vblank_put(dev, exynos_crtc->pipe);
@@ -335,14 +243,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
-
- /*
- * the values related to a buffer of the drm framebuffer
- * to be applied should be set at here. because these values
- * first, are set to shadow registers and then to
- * real registers at vsync front porch period.
- */
- exynos_drm_crtc_apply(crtc);
}
out:
mutex_unlock(&dev->struct_mutex);
@@ -362,18 +262,73 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(exynos_crtc);
}
+static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (property == dev_priv->crtc_mode_property) {
+ enum exynos_crtc_mode mode = val;
+
+ if (mode == exynos_crtc->mode)
+ return 0;
+
+ exynos_crtc->mode = mode;
+
+ switch (mode) {
+ case CRTC_MODE_NORMAL:
+ exynos_drm_crtc_commit(crtc);
+ break;
+ case CRTC_MODE_BLANK:
+ exynos_plane_dpms(exynos_crtc->plane,
+ DRM_MODE_DPMS_OFF);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = exynos_drm_crtc_page_flip,
.destroy = exynos_drm_crtc_destroy,
+ .set_property = exynos_drm_crtc_set_property,
+};
+
+static const struct drm_prop_enum_list mode_names[] = {
+ { CRTC_MODE_NORMAL, "normal" },
+ { CRTC_MODE_BLANK, "blank" },
};
-struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
- struct drm_crtc *crtc)
+static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
- return &exynos_crtc->overlay;
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ prop = dev_priv->crtc_mode_property;
+ if (!prop) {
+ prop = drm_property_create_enum(dev, 0, "mode", mode_names,
+ ARRAY_SIZE(mode_names));
+ if (!prop)
+ return;
+
+ dev_priv->crtc_mode_property = prop;
+ }
+
+ drm_object_attach_property(&crtc->base, prop, 0);
}
int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
@@ -391,8 +346,13 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
}
exynos_crtc->pipe = nr;
- exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
- exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
+ exynos_crtc->dpms = DRM_MODE_DPMS_ON;
+ exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true);
+ if (!exynos_crtc->plane) {
+ kfree(exynos_crtc);
+ return -ENOMEM;
+ }
+
crtc = &exynos_crtc->drm_crtc;
private->crtc[nr] = crtc;
@@ -400,6 +360,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
+ exynos_drm_crtc_attach_mode_property(crtc);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 16b8e21..6bae8d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -29,39 +29,8 @@
#ifndef _EXYNOS_DRM_CRTC_H_
#define _EXYNOS_DRM_CRTC_H_
-struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
- struct drm_crtc *crtc);
int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
-/*
- * Exynos specific crtc postion structure.
- *
- * @fb_x: offset x on a framebuffer to be displyed
- * - the unit is screen coordinates.
- * @fb_y: offset y on a framebuffer to be displayed
- * - the unit is screen coordinates.
- * @src_w: width of source area to be displayed from a framebuffer.
- * @src_h: height of source area to be displayed from a framebuffer.
- * @crtc_x: offset x on hardware screen.
- * @crtc_y: offset y on hardware screen.
- * @crtc_w: width of hardware screen.
- * @crtc_h: height of hardware screen.
- */
-struct exynos_drm_crtc_pos {
- unsigned int fb_x;
- unsigned int fb_y;
- unsigned int src_w;
- unsigned int src_h;
- unsigned int crtc_x;
- unsigned int crtc_y;
- unsigned int crtc_w;
- unsigned int crtc_h;
-};
-
-int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
- struct drm_framebuffer *fb,
- struct drm_display_mode *mode,
- struct exynos_drm_crtc_pos *pos);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index ca40c68..128627c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -25,6 +25,7 @@
#include "drmP.h"
#include "drm.h"
+#include "exynos_drm.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
@@ -235,6 +236,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
if (sgt->nents == 1) {
buffer->dma_addr = sg_dma_address(sgt->sgl);
buffer->size = sg_dma_len(sgt->sgl);
+
+ /* always physically continuous memory if sgt->nents is 1. */
+ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
unsigned int i = 0;
@@ -245,28 +249,24 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
sgl = sg_next(sgl);
i++;
}
+
+ /*
+ * this case could be CONTIG or NONCONTIG type but now CONTIG.
+ * we have to find a way that exporter can notify the type of
+ * its own buffer to importer. TODO
+ */
+ exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
exynos_gem_obj->buffer = buffer;
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
- /* register buffer information to private buffer manager. */
- ret = register_buf_to_priv_mgr(exynos_gem_obj,
- &exynos_gem_obj->priv_handle,
- &exynos_gem_obj->priv_id);
- if (ret < 0)
- goto err_release_gem;
-
- DRM_DEBUG_PRIME("ump id = %d, dma_addr = 0x%x, size = 0x%lx\n",
- exynos_gem_obj->priv_id, buffer->dma_addr, buffer->size);
+ DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ buffer->size);
return &exynos_gem_obj->base;
-err_release_gem:
- drm_gem_object_release(&exynos_gem_obj->base);
- kfree(exynos_gem_obj);
- exynos_gem_obj = NULL;
err_free_pages:
kfree(buffer->pages);
buffer->pages = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 340a8d6..90cff3f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -38,12 +38,13 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_rotator.h"
+#include "exynos_drm_ipp.h"
#include "exynos_drm_plane.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
+#include "exynos_drm_iommu.h"
-#define DRIVER_NAME "exynos-drm"
+#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
#define DRIVER_DATE "20110530"
#define DRIVER_MAJOR 1
@@ -51,53 +52,65 @@
#define VBLANK_OFF_DELAY 50000
-static int exynos_drm_list_gem_info(int id, void *ptr, void *data)
+struct exynos_drm_gem_info_data {
+ struct drm_file *filp;
+ struct seq_file *m;
+};
+
+static int exynos_drm_gem_one_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
- struct drm_file *filp = data;
- struct exynos_drm_gem_obj *gem = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = gem->buffer;
-
- DRM_INFO("%3d \t%3d \t%2d \t\t%2d \t0x%lx \t0x%x \t0x%lx "\
- "\t%2d \t\t%2d \t\t%2d\n",
- filp->pid,
- id,
- atomic_read(&obj->refcount.refcount),
- atomic_read(&obj->handle_count),
- gem->size,
- gem->flags,
- buf->page_size,
- buf->pfnmap,
- obj->export_dma_buf ? 1 : 0,
- obj->import_attach ? 1 : 0);
+ struct exynos_drm_gem_info_data *gem_info_data = data;
+ struct drm_exynos_file_private *file_priv =
+ gem_info_data->filp->driver_priv;
+ struct exynos_drm_gem_obj *exynos_gem = to_exynos_gem_obj(obj);
+ struct exynos_drm_gem_buf *buf = exynos_gem->buffer;
+
+ seq_printf(gem_info_data->m, "%3d \t%3d \t%3d \t%2d \t\t%2d \t0x%08lx"\
+ " \t0x%x \t0x%08lx \t%2d \t\t%2d \t\t%2d\n",
+ gem_info_data->filp->pid,
+ file_priv->tgid,
+ id,
+ atomic_read(&obj->refcount.refcount),
+ atomic_read(&obj->handle_count),
+ exynos_gem->size,
+ exynos_gem->flags,
+ buf->page_size,
+ buf->pfnmap,
+ obj->export_dma_buf ? 1 : 0,
+ obj->import_attach ? 1 : 0);
return 0;
}
-static ssize_t exynos_drm_show_gem_info(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int exynos_drm_gem_info(struct seq_file *m, void *data)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_file *filp;
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm_dev = node->minor->dev;
+ struct exynos_drm_gem_info_data gem_info_data;
+
+ gem_info_data.m = m;
- DRM_INFO("pid \thandle \trefcount \thcount \tsize \t\tflags "\
- "\tpage_size \tpfnmap \texport_to_fd \timport_from_fd\n");
+ seq_printf(gem_info_data.m, "pid \ttgid \thandle \trefcount \thcount "\
+ "\tsize \t\tflags \tpage_size \tpfnmap \t"\
+ "exyport_to_fd \timport_from_fd\n");
- list_for_each_entry(filp, &drm_dev->filelist, lhead)
- idr_for_each(&filp->object_idr, &exynos_drm_list_gem_info,
- filp);
+ list_for_each_entry(gem_info_data.filp, &drm_dev->filelist, lhead)
+ idr_for_each(&gem_info_data.filp->object_idr,
+ exynos_drm_gem_one_info, &gem_info_data);
- return strlen(buf);
+ return 0;
}
-static const struct device_attribute exynos_device_attrs[] = {
- __ATTR(gem_info, S_IRUGO, exynos_drm_show_gem_info, NULL)
+static struct drm_info_list exynos_drm_debugfs_list[] = {
+ {"gem_info", exynos_drm_gem_info, DRIVER_GEM},
};
+#define EXYNOS_DRM_DEBUGFS_ENTRIES ARRAY_SIZE(exynos_drm_debugfs_list)
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
+ struct drm_minor *minor;
int ret;
int nr;
@@ -109,6 +122,17 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
return -ENOMEM;
}
+ /* maximum size of userptr is limited to 16MB as default. */
+ private->userptr_limit = SZ_16M;
+
+ /* setup device address space for iommu. */
+ private->vmm = exynos_drm_iommu_setup(0x80000000, 0x40000000);
+ if (IS_ERR(private->vmm)) {
+ DRM_ERROR("failed to setup iommu.\n");
+ kfree(private);
+ return PTR_ERR(private->vmm);
+ }
+
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
@@ -130,8 +154,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
}
for (nr = 0; nr < MAX_PLANE; nr++) {
- ret = exynos_plane_init(dev, nr);
- if (ret)
+ struct drm_plane *plane;
+ unsigned int possible_crtcs = (1 << MAX_CRTC) - 1;
+
+ plane = exynos_plane_init(dev, possible_crtcs, false);
+ if (!plane)
goto err_crtc;
}
@@ -163,9 +190,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
drm_vblank_offdelay = VBLANK_OFF_DELAY;
- ret = device_create_file(dev->dev, &exynos_device_attrs[0]);
- if (ret < 0)
- DRM_DEBUG_DRIVER("failed to create sysfs.\n");
+ minor = dev->primary;
+ ret = drm_debugfs_create_files(exynos_drm_debugfs_list,
+ EXYNOS_DRM_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret)
+ DRM_DEBUG_DRIVER("failed to create exynos-drm debugfs.\n");
return 0;
@@ -182,8 +212,15 @@ err_crtc:
static int exynos_drm_unload(struct drm_device *dev)
{
+ struct exynos_drm_private *private;
+
+ private = dev->dev_private;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ /* release vmm object and device address space for iommu. */
+ exynos_drm_iommu_cleanup(private->vmm);
+
exynos_drm_fbdev_fini(dev);
exynos_drm_device_unregister(dev);
drm_vblank_cleanup(dev);
@@ -193,6 +230,9 @@ static int exynos_drm_unload(struct drm_device *dev)
dev->dev_private = NULL;
+ drm_debugfs_remove_files(exynos_drm_debugfs_list,
+ EXYNOS_DRM_DEBUGFS_ENTRIES, dev->primary);
+
return 0;
}
@@ -206,6 +246,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
if (!file_priv)
return -ENOMEM;
+ file_priv->tgid = task_tgid_nr(current);
+
drm_prime_init_file_private(&file->prime);
file->driver_priv = file_priv;
@@ -271,6 +313,9 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_drm_gem_userptr_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(EXYNOS_USER_LIMIT,
+ exynos_drm_gem_user_limit_ioctl, DRM_MASTER |
+ DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_EXPORT_UMP,
exynos_drm_gem_export_ump_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CACHE_OP,
@@ -280,8 +325,6 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
exynos_drm_gem_get_phy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_PHY_IMP,
exynos_drm_gem_phy_imp_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS,
- exynos_plane_set_zpos_ioctl, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
@@ -292,8 +335,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
- DRM_IOCTL_DEF_DRV(EXYNOS_ROTATOR_EXEC,
- exynos_drm_rotator_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
+ exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
+ exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_BUF,
+ exynos_drm_ipp_buf, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CTRL,
+ exynos_drm_ipp_ctrl, DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -307,8 +356,8 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
@@ -407,6 +456,24 @@ static int __init exynos_drm_init(void)
goto out_rotator;
#endif
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ ret = platform_driver_register(&fimc_driver);
+ if (ret < 0)
+ goto out_fimc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ ret = platform_driver_register(&gsc_driver);
+ if (ret < 0)
+ goto out_gsc;
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ ret = platform_driver_register(&ipp_driver);
+ if (ret < 0)
+ goto out_ipp;
+#endif
+
ret = platform_driver_register(&exynos_drm_platform_driver);
if (ret < 0)
goto out;
@@ -414,6 +481,21 @@ static int __init exynos_drm_init(void)
return 0;
out:
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+out_ipp:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+out_gsc:
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+out_fimc:
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
out_rotator:
@@ -451,6 +533,18 @@ static void __exit exynos_drm_exit(void)
platform_driver_unregister(&exynos_drm_platform_driver);
+#ifdef CONFIG_DRM_EXYNOS_IPP
+ platform_driver_unregister(&ipp_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_GSC
+ platform_driver_unregister(&gsc_driver);
+#endif
+
+#ifdef CONFIG_DRM_EXYNOS_FIMC
+ platform_driver_unregister(&fimc_driver);
+#endif
+
#ifdef CONFIG_DRM_EXYNOS_ROTATOR
platform_driver_unregister(&rotator_driver);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 463b086..8cdf93b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -59,12 +59,14 @@ enum exynos_drm_output_type {
*
* @mode_set: copy drm overlay info to hw specific overlay info.
* @commit: apply hardware specific overlay data to registers.
+ * @enable: enable hardware specific overlay.
* @disable: disable hardware specific overlay.
*/
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
struct exynos_drm_overlay *overlay);
void (*commit)(struct device *subdrv_dev, int zpos);
+ void (*enable)(struct device *subdrv_dev, int zpos);
void (*disable)(struct device *subdrv_dev, int zpos);
};
@@ -220,32 +222,30 @@ struct iommu_info_node {
* Exynos drm g2d private structure
*
* @dev: device object to device driver for using iommu.
- * @iommu_map_list: list head to iommu map information.
- * each device driver using iommu should have its own iommu_map_list
- * because device drivers have their own device address space and
- * the device address spaces could be duplicated echo other.
*/
struct exynos_drm_g2d_private {
struct device *dev;
struct list_head inuse_cmdlist;
struct list_head event_list;
- struct list_head iommu_map_list;
};
/*
- * Exynos drm rotator private structure
+ * Exynos drm ipp private structure
*
* @dev: device object to device driver for using driver data.
- * @iommu_list: list head to iommu map information.
+ * @ippdrv: link used ippdrv.
+ * @event_list: list head to event.
*/
-struct exynos_drm_rot_private {
- struct device *dev;
- struct list_head iommu_list;
+struct exynos_drm_ipp_private {
+ struct device *dev;
+ void *ippdrv;
+ struct list_head event_list;
};
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
- struct exynos_drm_rot_private *rot_priv;
+ struct exynos_drm_ipp_private *ipp_priv;
+ pid_t tgid;
};
/*
@@ -262,6 +262,18 @@ struct exynos_drm_private {
* this array is used to be aware of which crtc did it request vblank.
*/
struct drm_crtc *crtc[MAX_CRTC];
+
+ /*
+ * maximum size of allocation by userptr feature.
+ * - as default, this has 16MB and only root user can change it.
+ */
+ unsigned long userptr_limit;
+
+ /* a iovmm object for iommu support. */
+ void *vmm;
+
+ struct drm_property *plane_zpos_property;
+ struct drm_property *crtc_mode_property;
};
/*
@@ -289,7 +301,7 @@ struct exynos_drm_subdrv {
struct exynos_drm_manager *manager;
int (*probe)(struct drm_device *drm_dev, struct device *dev);
- void (*remove)(struct drm_device *dev);
+ void (*remove)(struct drm_device *drm_dev, struct device *dev);
int (*open)(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file);
void (*close)(struct drm_device *drm_dev, struct device *dev,
@@ -332,4 +344,7 @@ extern struct platform_driver exynos_drm_common_hdmi_driver;
extern struct platform_driver vidi_driver;
extern struct platform_driver g2d_driver;
extern struct platform_driver rotator_driver;
+extern struct platform_driver fimc_driver;
+extern struct platform_driver gsc_driver;
+extern struct platform_driver ipp_driver;
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7b..ebc4456 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -30,7 +30,6 @@
#include "drm_crtc_helper.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
#include "exynos_drm_encoder.h"
#define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\
@@ -136,21 +135,16 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev,
- encoder->crtc);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
+ if (connector->encoder == encoder)
if (manager_ops && manager_ops->mode_set)
manager_ops->mode_set(manager->dev,
adjusted_mode);
-
- if (overlay_ops && overlay_ops->mode_set)
- overlay_ops->mode_set(manager->dev, overlay);
- }
}
}
@@ -261,7 +255,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
return NULL;
}
- exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
+ exynos_encoder->dpms = DRM_MODE_DPMS_ON;
exynos_encoder->manager = manager;
encoder = &exynos_encoder->drm_encoder;
encoder->possible_crtcs = possible_crtcs;
@@ -317,8 +311,8 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager_ops *manager_ops = manager->ops;
int crtc = *(int *)data;
- if (manager->pipe == -1)
- manager->pipe = crtc;
+ if (manager->pipe != crtc)
+ return;
if (manager_ops->enable_vblank)
manager_ops->enable_vblank(manager->dev);
@@ -331,34 +325,41 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager_ops *manager_ops = manager->ops;
int crtc = *(int *)data;
- if (manager->pipe == -1)
- manager->pipe = crtc;
+ if (manager->pipe != crtc)
+ return;
if (manager_ops->disable_vblank)
manager_ops->disable_vblank(manager->dev);
}
-void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
- void *data)
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
{
- struct exynos_drm_manager *manager =
- to_exynos_encoder(encoder)->manager;
- struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- int zpos = DEFAULT_ZPOS;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ int mode = *(int *)data;
- if (data)
- zpos = *(int *)data;
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev, zpos);
+ if (manager_ops && manager_ops->dpms)
+ manager_ops->dpms(manager->dev, mode);
+
+ /*
+ * if this condition is ok then it means that the crtc is already
+ * detached from encoder and last function for detaching is properly
+ * done, so clear pipe from manager to prevent repeated call.
+ */
+ if (mode > DRM_MODE_DPMS_ON) {
+ if (!encoder->crtc)
+ manager->pipe = -1;
+ }
}
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
- int crtc = *(int *)data;
- int zpos = DEFAULT_ZPOS;
+ int pipe = *(int *)data;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -366,76 +367,62 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
* when crtc is detached from encoder, this pipe is used
* to select manager operation
*/
- manager->pipe = crtc;
-
- exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+ manager->pipe = pipe;
}
-void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data)
{
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- int mode = *(int *)data;
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ struct exynos_drm_overlay *overlay = data;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_drm_encoder_dpms(encoder, mode);
-
- exynos_encoder->dpms = mode;
+ if (overlay_ops && overlay_ops->mode_set)
+ overlay_ops->mode_set(manager->dev, overlay);
}
-void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data)
{
- struct drm_device *dev = encoder->dev;
- struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
- struct exynos_drm_manager *manager = exynos_encoder->manager;
- struct exynos_drm_manager_ops *manager_ops = manager->ops;
- struct drm_connector *connector;
- int mode = *(int *)data;
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
+ struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (manager_ops && manager_ops->dpms)
- manager_ops->dpms(manager->dev, mode);
-
- /*
- * set current dpms mode to the connector connected to
- * current encoder. connector->dpms would be checked
- * at drm_helper_connector_dpms()
- */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- connector->dpms = mode;
+ if (data)
+ zpos = *(int *)data;
- /*
- * if this condition is ok then it means that the crtc is already
- * detached from encoder and last function for detaching is properly
- * done, so clear pipe from manager to prevent repeated call.
- */
- if (mode > DRM_MODE_DPMS_ON) {
- if (!encoder->crtc)
- manager->pipe = -1;
- }
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev, zpos);
}
-void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
- struct exynos_drm_overlay *overlay = data;
+ int zpos = DEFAULT_ZPOS;
- if (overlay_ops && overlay_ops->mode_set)
- overlay_ops->mode_set(manager->dev, overlay);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->enable)
+ overlay_ops->enable(manager->dev, zpos);
}
-void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
int zpos = DEFAULT_ZPOS;
- DRM_DEBUG_KMS("\n");
+ DRM_DEBUG_KMS("%s\n", __FILE__);
if (data)
zpos = *(int *)data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index eb7d231..6470d9d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -40,13 +40,11 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
void (*fn)(struct drm_encoder *, void *));
void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
- void *data);
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
- void *data);
void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
-void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7b47330..41009e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -49,10 +49,19 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
-static int check_fb_gem_memory_type(struct exynos_drm_gem_obj *exynos_gem_obj)
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
{
+ struct exynos_drm_private *private = drm_dev->dev_private;
unsigned int flags;
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (private->vmm)
+ return 0;
+
flags = exynos_gem_obj->flags;
/* not support physically non-continuous memory for fb yet. TODO */
@@ -64,14 +73,63 @@ static int check_fb_gem_memory_type(struct exynos_drm_gem_obj *exynos_gem_obj)
return 0;
}
+static int check_fb_gem_size(struct drm_device *drm_dev,
+ struct drm_framebuffer *fb,
+ unsigned int nr)
+{
+ unsigned long fb_size;
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+ /* in case of RGB format, only one plane is used. */
+ if (nr < 2) {
+ exynos_gem_obj = exynos_fb->exynos_gem_obj[0];
+ obj = &exynos_gem_obj->base;
+ fb_size = fb->pitches[0] * fb->height;
+
+ if (fb_size != exynos_gem_obj->packed_size) {
+ DRM_ERROR("invalid fb or gem size.\n");
+ return -EINVAL;
+ }
+ /* in case of NV12MT, YUV420M and so on, two and three planes. */
+ } else {
+ unsigned int i;
+
+ for (i = 0; i < nr; i++) {
+ exynos_gem_obj = exynos_fb->exynos_gem_obj[i];
+ obj = &exynos_gem_obj->base;
+ fb_size = fb->pitches[i] * fb->height;
+
+ if (fb_size != exynos_gem_obj->packed_size) {
+ DRM_ERROR("invalid fb or gem size.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ unsigned int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
drm_framebuffer_cleanup(fb);
+ for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+ struct drm_gem_object *obj;
+
+ if (exynos_fb->exynos_gem_obj[i] == NULL)
+ continue;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -117,7 +175,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
exynos_gem_obj = to_exynos_gem_obj(obj);
- ret = check_fb_gem_memory_type(exynos_gem_obj);
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
return ERR_PTR(-EINVAL);
@@ -149,8 +207,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_gem_object *obj;
struct drm_framebuffer *fb;
struct exynos_drm_fb *exynos_fb;
- int nr;
- int i;
+ int nr, i, ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -160,11 +217,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb))
+ if (IS_ERR(fb)) {
+ drm_gem_object_unreference_unlocked(obj);
return fb;
+ }
exynos_fb = to_exynos_fb(fb);
nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -181,11 +238,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
exynos_gem_obj = to_exynos_gem_obj(obj);
- ret = check_fb_gem_memory_type(exynos_gem_obj);
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
if (ret < 0) {
DRM_ERROR("cannot use this gem memory type for fb.\n");
exynos_drm_fb_destroy(fb);
@@ -195,6 +250,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
+ ret = check_fb_gem_size(dev, fb, nr);
+ if (ret < 0) {
+ exynos_drm_fb_destroy(fb);
+ return ERR_PTR(ret);
+ }
+
return fb;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index d5586cc..df34adf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -82,9 +82,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
offset += fbi->var.yoffset * fb->pitches[0];
- dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
+ dev->mode_config.fb_base = (resource_size_t)buffer->paddr;
fbi->screen_base = buffer->kvaddr + offset;
- fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
+ fbi->fix.smem_start = (unsigned long)(buffer->paddr + offset);
fbi->screen_size = size;
fbi->fix.smem_len = size;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 0000000..fdf5be8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,1925 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include "drmP.h"
+#include "drm_backlight.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/cma.h>
+#include <plat/map-base.h>
+
+#include "regs-fimc.h"
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_fimc.h"
+
+/*
+ * FIMC is stand for Fully Interactive Mobile Camera and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * FIMC supports image rotation and image effect functions.
+ */
+
+#define FIMC_MAX_DEVS 4
+#define FIMC_MAX_SRC 2
+#define FIMC_MAX_DST 32
+#ifdef CONFIG_SLP_DISP_DEBUG
+#define FIMC_MAX_REG 128
+#define FIMC_BASE_REG(id) (0x11800000 + (0x10000 * id))
+#endif
+#define FIMC_CLK_RATE 166750000
+#define FIMC_BUF_STOP 1
+#define FIMC_BUF_START 2
+#define FIMC_REG_SZ 32
+#define FIMC_WIDTH_ITU_709 1280
+
+#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct fimc_context, ippdrv);
+#define fimc_read(offset) readl(ctx->regs + (offset));
+#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset));
+
+enum fimc_wb {
+ FIMC_WB_NONE,
+ FIMC_WB_A,
+ FIMC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @bypass: unused scaler path.
+ * @up_h: horizontal scale up.
+ * @up_v: vertical scale up.
+ * @hratio: horizontal ratio.
+ * @vratio: vertical ratio.
+ */
+struct fimc_scaler {
+ bool range;
+ bool bypass;
+ bool up_h;
+ bool up_v;
+ u32 hratio;
+ u32 vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual table 43-1.
+ * @in_hori: scaler input horizontal size.
+ * @bypass: scaler bypass mode.
+ * @dst_h_wo_rot: target horizontal size without output rotation.
+ * @dst_h_rot: target horizontal size with output rotation.
+ * @rl_w_wo_rot: real width without input rotation.
+ * @rl_h_rot: real height without output rotation.
+ */
+struct fimc_capability {
+ /* scaler */
+ u32 in_hori;
+ u32 bypass;
+ /* output rotator */
+ u32 dst_h_wo_rot;
+ u32 dst_h_rot;
+ /* input rotator */
+ u32 rl_w_wo_rot;
+ u32 rl_h_rot;
+};
+
+/*
+ * A structure of fimc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @sclk_fimc_clk: fimc source clock.
+ * @fimc_clk: fimc clock.
+ * @wb_clk: writeback a clock.
+ * @wb_b_clk: writeback b clock.
+ * @sc: scaler infomations.
+ * @capa: scaler capability.
+ * @odr: ordering of YUV.
+ * @ver: fimc version.
+ * @pol: porarity of writeback.
+ * @id: fimc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct fimc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *sclk_fimc_clk;
+ struct clk *fimc_clk;
+ struct clk *wb_clk;
+ struct clk *wb_b_clk;
+ struct fimc_scaler sc;
+ struct fimc_capability *capa;
+ enum exynos_drm_fimc_ver ver;
+ struct exynos_drm_fimc_pol pol;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+struct fimc_capability fimc51_capa[FIMC_MAX_DEVS] = {
+ {
+ .in_hori = 4224,
+ .bypass = 8192,
+ .dst_h_wo_rot = 4224,
+ .dst_h_rot = 1920,
+ .rl_w_wo_rot = 8192,
+ .rl_h_rot = 1920,
+ }, {
+ .in_hori = 4224,
+ .bypass = 8192,
+ .dst_h_wo_rot = 4224,
+ .dst_h_rot = 1920,
+ .rl_w_wo_rot = 8192,
+ .rl_h_rot = 1920,
+ }, {
+ .in_hori = 4224,
+ .bypass = 8192,
+ .dst_h_wo_rot = 4224,
+ .dst_h_rot = 1920,
+ .rl_w_wo_rot = 8192,
+ .rl_h_rot = 1920,
+ }, {
+ .in_hori = 1920,
+ .bypass = 8192,
+ .dst_h_wo_rot = 1920,
+ .dst_h_rot = 1366,
+ .rl_w_wo_rot = 8192,
+ .rl_h_rot = 1366,
+ },
+};
+
+static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
+
+ cfg = fimc_read(EXYNOS_CISRCFMT);
+ cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
+ if (pattern)
+ cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
+
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ /* s/w reset */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= (EXYNOS_CIGCTRL_SWRST);
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* s/w reset complete */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_SWRST;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+}
+
+static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
+{
+ u32 camblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ camblk_cfg = readl(SYSREG_CAMERA_BLK);
+ camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
+ camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
+
+ writel(camblk_cfg, SYSREG_CAMERA_BLK);
+}
+
+static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
+ EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
+ EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
+
+ switch (wb) {
+ case FIMC_WB_A:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_B:
+ cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
+ EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
+ break;
+ case FIMC_WB_NONE:
+ default:
+ cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
+ EXYNOS_CIGCTRL_SELWRITEBACK_A |
+ EXYNOS_CIGCTRL_SELCAM_MIPI_A |
+ EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
+ break;
+ }
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_set_polarity(struct fimc_context *ctx,
+ struct exynos_drm_fimc_pol *pol)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
+ __func__, pol->inv_pclk, pol->inv_vsync);
+ DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
+ __func__, pol->inv_href, pol->inv_hsync);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
+ EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
+
+ if (pol->inv_pclk)
+ cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
+ if (pol->inv_vsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
+ if (pol->inv_href)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
+ if (pol->inv_hsync)
+ cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
+ else
+ cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
+ bool overflow, bool level)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, level);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ if (enable) {
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
+ if (overflow)
+ cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
+ if (level)
+ cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
+ } else
+ cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static void fimc_clear_irq(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static bool fimc_check_ovf(struct fimc_context *ctx)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status, flag;
+
+ status = fimc_read(EXYNOS_CISTATUS);
+ flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
+ EXYNOS_CISTATUS_OVFICR;
+
+ DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
+
+ if (status & flag) {
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
+ EXYNOS_CIWDOFST_CLROVFICR);
+
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return true;
+ }
+
+ return false;
+}
+
+static bool fimc_check_frame_end(struct fimc_context *ctx)
+{
+ u32 cfg;
+
+ cfg = fimc_read(EXYNOS_CISTATUS);
+
+ DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
+
+ if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
+ return false;
+
+ cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
+ fimc_write(cfg, EXYNOS_CISTATUS);
+
+ return true;
+}
+
+static int fimc_get_buf_id(struct fimc_context *ctx)
+{
+ u32 cfg;
+ int frame_cnt, buf_id;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ cfg = fimc_read(EXYNOS_CISTATUS2);
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
+
+ if (frame_cnt == 0)
+ frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
+
+ DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+
+ if (frame_cnt == 0) {
+ DRM_ERROR("failed to get frame count.\n");
+ return -EIO;
+ }
+
+ buf_id = frame_cnt - 1;
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ return buf_id;
+}
+
+static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ if (enable)
+ cfg |= EXYNOS_CIOCTRL_LASTENDEN;
+ else
+ cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+}
+
+static int fimc_set_planar_addr(struct drm_exynos_ipp_buf_info *buf_info,
+ u32 fmt, struct drm_exynos_sz *sz)
+{
+ dma_addr_t *y_addr = &buf_info->base[EXYNOS_DRM_PLANAR_Y];
+ dma_addr_t *cb_addr = &buf_info->base[EXYNOS_DRM_PLANAR_CB];
+ dma_addr_t *cr_addr = &buf_info->base[EXYNOS_DRM_PLANAR_CR];
+ uint64_t y_ofs, cb_ofs, cr_ofs;
+
+ /*
+ * ToDo: check the buffer size between gem allocated buffers
+ * and each planar size.
+ */
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ y_ofs = sz->hsize * sz->vsize;
+ cb_ofs = y_ofs >> 1;
+ cr_ofs = 0;
+ break;
+ case DRM_FORMAT_NV12M:
+ y_ofs = ALIGN(ALIGN(sz->hsize, 16) *
+ ALIGN(sz->vsize, 16), SZ_2K);
+ cb_ofs = ALIGN(ALIGN(sz->hsize, 16) *
+ ALIGN(sz->vsize >> 1, 16), SZ_2K);
+ cr_ofs = 0;
+ break;
+ case DRM_FORMAT_NV12MT:
+ y_ofs = ALIGN(ALIGN(sz->hsize, 128) *
+ ALIGN(sz->vsize, 32), SZ_8K);
+ cb_ofs = ALIGN(ALIGN(sz->hsize, 128) *
+ ALIGN(sz->vsize >> 1, 32), SZ_8K);
+ cr_ofs = 0;
+ break;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ case DRM_FORMAT_YUV420M:
+ y_ofs = sz->hsize * sz->vsize;
+ cb_ofs = cr_ofs = y_ofs >> 2;
+ break;
+ default:
+ y_ofs = cb_ofs = cr_ofs = 0;
+ break;
+ }
+
+ if (y_ofs && *y_addr) {
+ *cb_addr = *y_addr + y_ofs;
+
+ if (cb_ofs && *cb_addr)
+ *cr_addr = *cb_addr + cb_ofs;
+ }
+
+ return 0;
+}
+
+static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE |
+ EXYNOS_MSCTRL_ORDER422_YCBYCR);
+
+ switch (fmt) {
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
+ EXYNOS_MSCTRL_C_INT_IN_2PLANE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return 0;
+}
+
+static int fimc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUV444:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV12MT:
+ cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_src_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ cfg &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_INROT90_CLOCKWISE |
+ EXYNOS_MSCTRL_FLIP_X_MIRROR |
+ EXYNOS_MSCTRL_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_MSCTRL);
+
+ return (cfg & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
+}
+
+static int fimc_set_window(struct fimc_context *ctx,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ u32 cfg, h1, h2, v1, v2;
+
+ /* cropped image */
+ h1 = pos->x;
+ h2 = sz->hsize - pos->w - pos->x;
+ v1 = pos->y;
+ v2 = sz->vsize - pos->h - pos->y;
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
+ DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
+ h1, h2, v1, v2);
+
+ /*
+ * set window offset 1, 2 size
+ * check figure 43-21 in user manual
+ */
+ cfg = fimc_read(EXYNOS_CIWDOFST);
+ cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
+ EXYNOS_CIWDOFST_WINVEROFST_MASK);
+ cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
+ EXYNOS_CIWDOFST_WINVEROFST(v1));
+ cfg |= EXYNOS_CIWDOFST_WINOFSEN;
+ fimc_write(cfg, EXYNOS_CIWDOFST);
+
+ cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
+ EXYNOS_CIWDOFST2_WINVEROFST2(v2));
+ fimc_write(cfg, EXYNOS_CIWDOFST2);
+
+ return 0;
+}
+
+static int fimc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ /* ToDo: check width and height */
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGISIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
+ pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* set input DMA image size */
+ cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
+ cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
+ EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
+ cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_sz.hsize) |
+ EXYNOS_CIREAL_ISIZE_HEIGHT(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
+
+ /*
+ * set input FIFO image size
+ * for now, we support only ITU601 8 bit mode
+ */
+ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
+ EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
+ fimc_write(cfg, EXYNOS_CISRCFMT);
+
+ config->sz = img_sz;
+ config->pos = img_pos;
+
+ return fimc_set_window(ctx, &img_pos, &img_sz);
+}
+
+static int fimc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ int ret;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ if (buf_id > FIMC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ case IPP_BUF_CTRL_DEQUEUE:
+ ret = fimc_set_planar_addr(buf_info, config->fmt, &config->sz);
+
+ if (ret) {
+ dev_err(dev, "failed to set plane addr.\n");
+ return ret;
+ }
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIIYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIICBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIICRSA(buf_id));
+ }
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_drm_ipp_ops fimc_src_ops = {
+ .set_fmt = fimc_src_set_fmt,
+ .set_transf = fimc_src_set_transf,
+ .set_size = fimc_src_set_size,
+ .set_addr = fimc_src_set_addr,
+};
+
+static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ /* RGB */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_RGB888:
+ cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ return 0;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
+ EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ /* YUV */
+ cfg = fimc_read(EXYNOS_CIOCTRL);
+ cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
+ EXYNOS_CIOCTRL_ORDER422_MASK |
+ EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
+
+ switch (fmt) {
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV16:
+ cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
+ cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CIOCTRL);
+
+ return 0;
+}
+
+static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = fimc_read(EXYNOS_CIEXTEN);
+
+ if (fmt == DRM_FORMAT_AYUV) {
+ cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+ } else {
+ cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
+ fimc_write(cfg, EXYNOS_CIEXTEN);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
+ break;
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV422:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12MT:
+ case DRM_FORMAT_NV21:
+ cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
+ fmt);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+ }
+
+ cfg = fimc_read(EXYNOS_CIDMAPARAM);
+ cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
+
+ if (fmt == DRM_FORMAT_NV12MT)
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
+ else
+ cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
+
+ fimc_write(cfg, EXYNOS_CIDMAPARAM);
+
+ return fimc_dst_set_fmt_order(ctx, fmt);
+}
+
+static int fimc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
+ cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
+ EXYNOS_CITRGFMT_FLIP_X_MIRROR |
+ EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ return (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
+}
+
+static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, shfactor;
+ u32 pre_dst_width, pre_dst_height;
+ u32 pre_hratio, hfactor, pre_vratio, vfactor;
+ int ret = 0;
+
+ ret = fimc_get_ratio_shift(src->w, dst->w, &pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = fimc_get_ratio_shift(src->h, dst->h, &pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ pre_dst_width = src->w / pre_hratio;
+ pre_dst_height = src->h / pre_vratio;
+ DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
+ pre_dst_width, pre_dst_height);
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, pre_hratio, hfactor, pre_vratio, vfactor);
+
+ sc->hratio = (src->w << 14) / (dst->w << hfactor);
+ sc->vratio = (src->h << 14) / (dst->h << vfactor);
+ sc->up_h = (dst->w >= src->w) ? true : false;
+ sc->up_v = (dst->h >= src->h) ? true : false;
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+
+ shfactor = 10 - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
+
+ cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
+ EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
+ EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
+ fimc_write(cfg, EXYNOS_CISCPRERATIO);
+
+ cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
+ EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
+ fimc_write(cfg, EXYNOS_CISCPREDST);
+
+ return ret;
+}
+
+static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
+{
+ u32 cfg, cfg_ext;
+
+ DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
+ __func__, sc->hratio, sc->vratio);
+
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
+ EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
+ EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
+ EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
+ EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+
+ if (sc->range)
+ cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
+ EXYNOS_CISCCTRL_CSCY2R_WIDE);
+ if (sc->bypass)
+ cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
+ if (sc->up_h)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
+ if (sc->up_v)
+ cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
+
+ cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
+ EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ cfg_ext = fimc_read(EXYNOS_CIEXTEN);
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
+ cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
+ cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
+ EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
+ fimc_write(cfg_ext, EXYNOS_CIEXTEN);
+}
+
+static int fimc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
+ EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
+
+ fimc_write(cfg, EXYNOS_ORGOSIZE);
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* CSC ITU */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
+
+ if (sz->hsize >= FIMC_WIDTH_ITU_709)
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
+ else
+ cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
+
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* target image size */
+ cfg = fimc_read(EXYNOS_CITRGFMT);
+ cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
+ EXYNOS_CITRGFMT_TARGETV_MASK);
+ cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
+ EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
+ fimc_write(cfg, EXYNOS_CITRGFMT);
+
+ /* target area */
+ cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
+ fimc_write(cfg, EXYNOS_CITAREA);
+
+ /* ToDo: Move Scaler in this line and YUV */
+ config->sz = img_sz;
+ config->pos = img_pos;
+
+ /* offset Y(RGB), Cb, Cr */
+ cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOYOFF);
+ cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCBOFF);
+ cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
+ EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
+ fimc_write(cfg, EXYNOS_CIOCROFF);
+
+ return 0;
+}
+
+static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
+{
+ u32 cfg, i, buf_num = 0;
+ u32 mask = 0x00000001;
+
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ for (i = 0; i < FIMC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num++;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool enable;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ /* mask register set */
+ cfg = fimc_read(EXYNOS_CIFCNTSEQ);
+
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ enable = true;
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ enable = false;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= (enable << buf_id);
+ fimc_write(cfg, EXYNOS_CIFCNTSEQ);
+
+ /* interrupt enable */
+ if (buf_ctrl == IPP_BUF_CTRL_QUEUE &&
+ fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* interrupt disable */
+ if (buf_ctrl == IPP_BUF_CTRL_DEQUEUE &&
+ fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+ return 0;
+}
+
+static int fimc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ int ret;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ if (buf_id > FIMC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ case IPP_BUF_CTRL_DEQUEUE:
+ ret = fimc_set_planar_addr(buf_info, config->fmt, &config->sz);
+
+ if (ret) {
+ dev_err(dev, "failed to set plane addr.\n");
+ return ret;
+ }
+
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ EXYNOS_CIOYSA(buf_id));
+
+ if (config->fmt == DRM_FORMAT_YVU420) {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCRSA(buf_id));
+ } else {
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ EXYNOS_CIOCBSA(buf_id));
+ fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ EXYNOS_CIOCRSA(buf_id));
+ }
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return fimc_dst_set_buf_seq(ctx, buf_id, buf_ctrl);
+}
+
+static struct exynos_drm_ipp_ops fimc_dst_ops = {
+ .set_fmt = fimc_dst_set_fmt,
+ .set_transf = fimc_dst_set_transf,
+ .set_size = fimc_dst_set_size,
+ .set_addr = fimc_dst_set_addr,
+};
+
+static int fimc_power_on(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ if (enable) {
+ clk_enable(ctx->sclk_fimc_clk);
+ clk_enable(ctx->fimc_clk);
+ clk_enable(ctx->wb_clk);
+ /* ToDo : wb_b_clk */
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->sclk_fimc_clk);
+ clk_disable(ctx->fimc_clk);
+ clk_disable(ctx->wb_clk);
+ /* ToDo : wb_b_clk */
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
+{
+ struct fimc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ int buf_id;
+ u32 status1, status2;
+
+ DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
+
+ /* ToDo: must be remove debug routine */
+ status1 = fimc_read(EXYNOS_CISTATUS);
+ status2 = fimc_read(EXYNOS_CISTATUS2);
+ DRM_DEBUG_KMS("%s:status1[0x%x]status2[0x%x]\n",
+ __func__, status1, status2);
+
+ fimc_clear_irq(ctx);
+ if (fimc_check_ovf(ctx))
+ return IRQ_NONE;
+
+ if (!fimc_check_frame_end(ctx))
+ return IRQ_NONE;
+
+ if (list_empty(&ippdrv->event_list)) {
+ DRM_DEBUG_KMS("%s:event list empty. so ignore.\n", __func__);
+
+ return IRQ_NONE;
+ }
+
+ buf_id = fimc_get_buf_id(ctx);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (fimc_dst_set_buf_seq(ctx, buf_id,
+ IPP_BUF_CTRL_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+
+ return IRQ_HANDLED;
+ }
+
+ ipp_send_event_handler(ippdrv, buf_id);
+
+ if (fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
+ fimc_handle_irq(ctx, false, false, true);
+
+ return IRQ_HANDLED;
+}
+
+static int fimc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ /* ToDo: check valid using property information */
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return 0;
+}
+
+static int fimc_ippdrv_reset(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ fimc_sw_reset(ctx, false);
+
+ /* reset scaler capability */
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ return 0;
+}
+
+static int fimc_check_prepare(struct fimc_context *ctx)
+{
+ /* ToDo: check prepare using read register */
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return 0;
+}
+
+static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config;
+ struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
+ int ret, i;
+ int enable = 1;
+ u32 cfg0, cfg1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ ret = fimc_check_prepare(ctx);
+ if (ret) {
+ dev_err(dev, "failed to check prepare.\n");
+ return ret;
+ }
+
+ ippdrv->cmd = cmd;
+ fimc_handle_irq(ctx, true, false, true);
+
+ /* ToDo: window size, prescaler config */
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ config = &property->config[i];
+ img_pos[i] = config->pos;
+ }
+
+ ret = fimc_set_prescaler(ctx, &ctx->sc,
+ &img_pos[EXYNOS_DRM_OPS_SRC],
+ &img_pos[EXYNOS_DRM_OPS_DST]);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ /* If set ture, we can save jpeg about screen */
+ fimc_handle_jpeg(ctx, false);
+ fimc_set_scaler(ctx, &ctx->sc);
+ fimc_set_polarity(ctx, &ctx->pol);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
+ fimc_handle_lastend(ctx, false);
+
+ /* setup dma */
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ fimc_set_type_ctrl(ctx, FIMC_WB_A);
+ fimc_handle_lastend(ctx, true);
+
+ /* setup FIMD */
+ fimc_set_camblk_fimd0_wb(ctx);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)enable);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ /* Reset status */
+ fimc_write(0x0, EXYNOS_CISTATUS);
+
+ cfg0 = fimc_read(EXYNOS_CIIMGCPT);
+ cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
+
+ /* Scaler */
+ cfg1 = fimc_read(EXYNOS_CISCCTRL);
+ cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
+ cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
+ EXYNOS_CISCCTRL_SCALERSTART);
+
+ fimc_write(cfg1, EXYNOS_CISCCTRL);
+
+ /* Enable image capture*/
+ cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
+ fimc_write(cfg0, EXYNOS_CIIMGCPT);
+
+ /* Disable frame end irq */
+ cfg0 = fimc_read(EXYNOS_CIGCTRL);
+ cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg0, EXYNOS_CIGCTRL);
+
+ cfg0 = fimc_read(EXYNOS_CIOCTRL);
+ cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
+ fimc_write(cfg0, EXYNOS_CIOCTRL);
+
+ /* ToDo: m2m start errata - refer fimd */
+ if (cmd == IPP_CMD_M2M) {
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+
+ cfg0 = fimc_read(EXYNOS_MSCTRL);
+ cfg0 |= EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg0, EXYNOS_MSCTRL);
+ }
+
+ return 0;
+}
+
+static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ int enable = 0;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* Source clear */
+ cfg = fimc_read(EXYNOS_MSCTRL);
+ cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
+ cfg &= ~EXYNOS_MSCTRL_ENVID;
+ fimc_write(cfg, EXYNOS_MSCTRL);
+ break;
+ case IPP_CMD_WB:
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)enable);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ ippdrv->cmd = IPP_CMD_NONE;
+ fimc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ fimc_write(0x0, EXYNOS_CIFCNTSEQ);
+
+ /* Scaler disable */
+ cfg = fimc_read(EXYNOS_CISCCTRL);
+ cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
+ fimc_write(cfg, EXYNOS_CISCCTRL);
+
+ /* Disable image capture */
+ cfg = fimc_read(EXYNOS_CIIMGCPT);
+ cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
+ fimc_write(cfg, EXYNOS_CIIMGCPT);
+
+ /* Enable frame end irq */
+ cfg = fimc_read(EXYNOS_CIGCTRL);
+ cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
+ fimc_write(cfg, EXYNOS_CIGCTRL);
+}
+
+static struct fimc_capability *fimc_get_capability(
+ enum exynos_drm_fimc_ver ver)
+{
+ struct fimc_capability *capa;
+
+ DRM_DEBUG_KMS("%s:ver[0x%x]\n", __func__, ver);
+
+ /* ToDo: version check */
+ switch (ver) {
+ case FIMC_EXYNOS_4412:
+ default:
+ capa = fimc51_capa;
+ break;
+ }
+
+ return capa;
+}
+
+#ifdef CONFIG_SLP_DISP_DEBUG
+static int fimc_read_reg(struct fimc_context *ctx, char *buf)
+{
+ u32 cfg;
+ int i;
+ int pos = 0;
+
+ pos += sprintf(buf+pos, "0x%.8x | ", FIMC_BASE_REG(ctx->id));
+ for (i = 1; i < FIMC_MAX_REG + 1; i++) {
+ cfg = fimc_read((i-1) * sizeof(u32));
+ pos += sprintf(buf+pos, "0x%.8x ", cfg);
+ if (i % 4 == 0)
+ pos += sprintf(buf+pos, "\n0x%.8x | ",
+ FIMC_BASE_REG(ctx->id) + (i * sizeof(u32)));
+ }
+
+ pos += sprintf(buf+pos, "\n");
+
+ return pos;
+}
+
+static ssize_t show_read_reg(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ if (!ctx->regs) {
+ dev_err(dev, "failed to get current register.\n");
+ return -EINVAL;
+ }
+
+ return fimc_read_reg(ctx, buf);
+}
+
+static struct device_attribute device_attrs[] = {
+ __ATTR(read_reg, S_IRUGO, show_read_reg, NULL),
+};
+#endif
+
+static int __devinit fimc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx;
+ struct clk *parent_clk;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_fimc_pdata *pdata;
+ int ret = -EINVAL;
+#ifdef CONFIG_SLP_DISP_DEBUG
+ int i;
+#endif
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
+ if (IS_ERR(ctx->sclk_fimc_clk)) {
+ dev_err(dev, "failed to get src fimc clock.\n");
+ ret = PTR_ERR(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+ clk_enable(ctx->sclk_fimc_clk);
+
+ ctx->fimc_clk = clk_get(dev, "fimc");
+ if (IS_ERR(ctx->fimc_clk)) {
+ dev_err(dev, "failed to get fimc clock.\n");
+ ret = PTR_ERR(ctx->fimc_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_clk = clk_get(dev, "pxl_async0");
+ if (IS_ERR(ctx->wb_clk)) {
+ dev_err(dev, "failed to get writeback a clock.\n");
+ ret = PTR_ERR(ctx->wb_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ goto err_ctx;
+ }
+
+ ctx->wb_b_clk = clk_get(dev, "pxl_async1");
+ if (IS_ERR(ctx->wb_b_clk)) {
+ dev_err(dev, "failed to get writeback b clock.\n");
+ ret = PTR_ERR(ctx->wb_b_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ goto err_ctx;
+ }
+
+ if (pdata->ver == FIMC_EXYNOS_4212 ||
+ pdata->ver == FIMC_EXYNOS_4412)
+ parent_clk = clk_get(dev, "mout_mpll_user");
+ else
+ parent_clk = clk_get(dev, "mout_mpll");
+
+ if (IS_ERR(parent_clk)) {
+ dev_err(dev, "failed to get parent clock.\n");
+ ret = PTR_ERR(parent_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+
+ if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
+ dev_err(dev, "failed to set parent.\n");
+ clk_put(parent_clk);
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+ goto err_ctx;
+ }
+ clk_put(parent_clk);
+ clk_set_rate(ctx->sclk_fimc_clk, FIMC_CLK_RATE);
+ clk_disable(ctx->sclk_fimc_clk);
+
+ /* resource memory */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to claim register region.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = ioremap(res->start, resource_size(res));
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
+ IRQF_ONESHOT, "drm_fimc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->ver = pdata->ver;
+ ctx->id = pdev->id;
+ ctx->capa = fimc_get_capability(ctx->ver);
+ if (!ctx->capa) {
+ dev_err(dev, "failed to get capability.\n");
+ goto err_get_irq;
+ }
+ ctx->pol = pdata->pol;
+
+#ifdef CONFIG_SLP_DISP_DEBUG
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ ret = device_create_file(&(pdev->dev),
+ &device_attrs[i]);
+ if (ret)
+ break;
+ }
+
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add sysfs entries\n");
+#endif
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
+ ippdrv->check_property = fimc_ippdrv_check_property;
+ ippdrv->reset = fimc_ippdrv_reset;
+ ippdrv->start = fimc_ippdrv_start;
+ ippdrv->stop = fimc_ippdrv_stop;
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm fimc device.\n");
+ goto err_get_irq;
+ }
+
+ dev_info(&pdev->dev, "drm fimc registered successfully.\n");
+
+ return 0;
+
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ iounmap(ctx->regs);
+err_req_region:
+ release_resource(ctx->regs_res);
+ kfree(ctx->regs_res);
+err_clk:
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+err_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit fimc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct fimc_context *ctx = get_fimc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ iounmap(ctx->regs);
+ release_resource(ctx->regs_res);
+ kfree(ctx->regs_res);
+
+ clk_put(ctx->sclk_fimc_clk);
+ clk_put(ctx->fimc_clk);
+ clk_put(ctx->wb_clk);
+ clk_put(ctx->wb_b_clk);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ if (pm_runtime_suspended(dev))
+ return 0;
+ /* ToDo */
+ return fimc_power_on(ctx, false);
+}
+
+static int fimc_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ if (!pm_runtime_suspended(dev))
+ return fimc_power_on(ctx, true);
+ /* ToDo */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimc_runtime_suspend(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ /* ToDo */
+ return fimc_power_on(ctx, false);
+}
+
+static int fimc_runtime_resume(struct device *dev)
+{
+ struct fimc_context *ctx = get_fimc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ /* ToDo */
+ return fimc_power_on(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops fimc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
+ SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
+};
+
+/* ToDo: need to check use case platform_device_id */
+struct platform_driver fimc_driver = {
+ .probe = fimc_probe,
+ .remove = __devexit_p(fimc_remove),
+ .driver = {
+ .name = "exynos-drm-fimc",
+ .owner = THIS_MODULE,
+ .pm = &fimc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 0000000..e631774
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FIMC_H_
+#define _EXYNOS_DRM_FIMC_H_
+
+/* ToDo */
+
+#endif /* _EXYNOS_DRM_FIMC_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
deleted file mode 100644
index 823bfb0..0000000
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ /dev/null
@@ -1,1543 +0,0 @@
-/* exynos_drm_fimd.c
- *
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * Authors:
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
-#include "drmP.h"
-#include "drm_backlight.h"
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <linux/cma.h>
-
-#include <drm/exynos_drm.h>
-#include <plat/regs-fb-v4.h>
-
-#include <plat/fimd_lite_ext.h>
-
-#include <mach/map.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_fbdev.h"
-#include "exynos_drm_crtc.h"
-
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
-#include <linux/devfreq/exynos4_display.h>
-#endif
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
-#include <plat/fimc.h>
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
-#include <plat/pd.h>
-#include <linux/pm_qos_params.h>
-#endif
-#define FIMD_GET_LCD_WIDTH _IOR('F', 302, int)
-#define FIMD_GET_LCD_HEIGHT _IOR('F', 303, int)
-#define FIMD_SET_WRITEBACK _IOW('F', 304, u32)
-#endif
-
-/*
- * FIMD is stand for Fully Interactive Mobile Display and
- * as a display controller, it transfers contents drawn on memory
- * to a LCD Panel through Display Interfaces such as RGB or
- * CPU Interface.
- */
-
-/* position control register for hardware window 0, 2 ~ 4.*/
-#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
-#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
-/* size control register for hardware window 0. */
-#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08)
-/* alpha control register for hardware window 1 ~ 4. */
-#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16)
-/* size control register for hardware window 1 ~ 4. */
-#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
-
-#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
-#define VIDWx_BUF_END(win, buf) (VIDW_BUF_END(buf) + (win) * 8)
-#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
-
-/* color key control register for hardware window 1 ~ 4. */
-#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8))
-/* color key value register for hardware window 1 ~ 4. */
-#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8))
-
-/* FIMD has totally five hardware windows. */
-#define WINDOWS_NR 5
-
-#define get_fimd_context(dev) platform_get_drvdata(to_platform_device(dev))
-
-static struct s5p_fimd_ext_device *fimd_lite_dev, *mdnie;
-static struct s5p_fimd_dynamic_refresh *fimd_refresh;
-
-struct fimd_notifier_block {
- struct list_head list;
- void *data;
- int (*client_notifier)(unsigned int val, void *data);
-};
-
-static LIST_HEAD(fimd_notifier_list);
-static DEFINE_MUTEX(fimd_notifier_lock);
-
-struct fimd_win_data {
- unsigned int offset_x;
- unsigned int offset_y;
- unsigned int ovl_width;
- unsigned int ovl_height;
- unsigned int fb_width;
- unsigned int fb_height;
- unsigned int bpp;
- dma_addr_t dma_addr;
- void __iomem *vaddr;
- unsigned int buf_offsize;
- unsigned int line_size; /* bytes */
- bool enabled;
-};
-
-struct fimd_context {
- struct exynos_drm_subdrv subdrv;
- int irq;
- struct drm_crtc *crtc;
- struct clk *bus_clk;
- struct clk *lcd_clk;
- struct resource *regs_res;
- void __iomem *regs;
- struct fimd_win_data win_data[WINDOWS_NR];
- unsigned int clkdiv;
- unsigned int default_win;
- unsigned long irq_flags;
- u32 vidcon0;
- u32 vidcon1;
- bool suspended;
- struct mutex lock;
-
- struct exynos_drm_panel_info *panel;
- unsigned int high_freq;
- unsigned int dynamic_refresh;
- struct notifier_block nb_exynos_display;
-
- struct work_struct work;
- bool errata;
-#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
- struct notifier_block nb_ctrl;
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
- struct pm_qos_request_list pm_qos;
-#endif
-#endif
-};
-
-static bool fimd_display_is_connected(struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /* TODO. */
-
- return true;
-}
-
-static void *fimd_get_panel(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return ctx->panel;
-}
-
-static int fimd_check_timing(struct device *dev, void *timing)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /* TODO. */
-
- return 0;
-}
-
-static int fimd_display_power_on(struct device *dev, int mode)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /* TODO */
-
- drm_bl_dpms(mode);
-
- return 0;
-}
-
-static struct exynos_drm_display_ops fimd_display_ops = {
- .type = EXYNOS_DISPLAY_TYPE_LCD,
- .is_connected = fimd_display_is_connected,
- .get_panel = fimd_get_panel,
- .check_timing = fimd_check_timing,
- .power_on = fimd_display_power_on,
-};
-
-static void exynos_drm_mdnie_mode_stop(struct fimd_context *ctx)
-{
- struct s5p_fimd_ext_driver *fimd_lite_drv;
- u32 cfg;
-
- fimd_lite_drv = to_fimd_ext_driver(fimd_lite_dev->dev.driver);
-
- /* set dualrgb register to mDNIe mode. */
- cfg = readl(ctx->regs + DUALRGB);
- cfg &= ~(0x3 << 0);
- writel(cfg, ctx->regs + DUALRGB);
- msleep(20);
-
- /* change display path. */
- cfg = readl(S3C_VA_SYS + 0x210);
- cfg |= 1 << 1;
- writel(cfg, S3C_VA_SYS + 0x210);
-
- if (fimd_lite_drv->stop)
- fimd_lite_drv->stop(fimd_lite_dev);
-
- if (fimd_lite_drv->setup)
- fimd_lite_drv->setup(fimd_lite_dev, 0);
-
- /* clock off */
- if (fimd_lite_drv->power_off)
- fimd_lite_drv->power_off(fimd_lite_dev);
-
- fimd_lite_dev->enabled = false;
-}
-
-static void exynos_drm_set_mdnie_mode(struct fimd_context *ctx)
-{
- u32 cfg;
-
- /* change display path. */
- cfg = readl(S3C_VA_SYS + 0x210);
- /* MIE_LBLK0 is mDNIe. */
- cfg |= 1 << 0;
- /* FIMDBYPASS_LBLK0 is MIE/mDNIe. */
- cfg &= ~(1 << 1);
- writel(cfg, S3C_VA_SYS + 0x210);
-
- /* all polarity values should be 0 for mDNIe. */
- cfg = readl(ctx->regs + VIDCON1);
- cfg &= ~(VIDCON1_INV_VCLK | VIDCON1_INV_HSYNC |
- VIDCON1_INV_VSYNC | VIDCON1_INV_VDEN |
- VIDCON1_VCLK_MASK);
-
- writel(cfg, ctx->regs + VIDCON1);
-
- /* set dualrgb register to mDNIe mode. */
- cfg = readl(ctx->regs + DUALRGB);
- cfg &= ~(0x3 << 0);
- cfg |= 0x3 << 0;
- writel(cfg, ctx->regs + DUALRGB);
-}
-
-static int exynos_drm_change_to_mdnie(struct fimd_context *ctx)
-{
- u32 cfg;
- struct s5p_fimd_ext_driver *mdnie_drv, *fimd_lite_drv;
-
- mdnie_drv = to_fimd_ext_driver(mdnie->dev.driver);
- fimd_lite_drv = to_fimd_ext_driver(fimd_lite_dev->dev.driver);
-
- /**
- * path change sequence for mDNIe.
- *
- * 1. FIMD-LITE DMA stop.
- * 2. FIMD DMA stop.
- * 3. change DISPLAY_CONTROL and DUALRGB registers to mDNIe mode.
- * 4. change FIMD VCLKFREE to freerun mode.
- * 5. initialize mDNIe module.
- * 6. initialize FIMD-LITE module.
- * 7. FIMD-LITE logic start.
- * 8. FIMD-LITE DMA start.
- * 9. FIMD DMA start.
- *
- * ps. FIMD polarity values should be 0.
- * lcd polarity values should be set to FIMD-LITE.
- * FIMD and FIMD-LITE DMA should be started at same time.
- */
- /* set fimd to mDNIe mode.(WB/mDNIe) */
- exynos_drm_set_mdnie_mode(ctx);
-
- /* enable FIMD-LITE. clk */
- if (fimd_lite_drv && fimd_lite_drv->power_on)
- fimd_lite_drv->power_on(fimd_lite_dev);
-
- /* setup mDNIe. */
- if (mdnie_drv)
- mdnie_drv->setup(mdnie, 1);
-
- /* setup FIMD-LITE. */
- if (fimd_lite_drv)
- fimd_lite_drv->setup(fimd_lite_dev, 1);
-
- cfg = readl(ctx->regs + VIDCON0);
- cfg |= VIDCON0_ENVID | VIDCON0_ENVID_F;
- writel(cfg, ctx->regs + VIDCON0);
-
- if (fimd_lite_drv->start)
- fimd_lite_drv->start(fimd_lite_dev);
- return 0;
-}
-
-static void fimd_dpms(struct device *subdrv_dev, int mode)
-{
- struct fimd_context *ctx = get_fimd_context(subdrv_dev);
-
- DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
-
- mutex_lock(&ctx->lock);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- /*
- * enable fimd hardware only if suspended status.
- *
- * P.S. fimd_dpms function would be called at booting time so
- * clk_enable could be called double time.
- */
- if (ctx->suspended)
- pm_runtime_get_sync(subdrv_dev);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- if (!ctx->suspended)
- pm_runtime_put_sync(subdrv_dev);
- break;
- default:
- DRM_DEBUG_KMS("unspecified mode %d\n", mode);
- break;
- }
-
- mutex_unlock(&ctx->lock);
-}
-
-static void fimd_apply(struct device *subdrv_dev)
-{
- struct fimd_context *ctx = get_fimd_context(subdrv_dev);
- struct exynos_drm_manager *mgr = ctx->subdrv.manager;
- struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
- struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
- struct fimd_win_data *win_data;
- int i;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- for (i = 0; i < WINDOWS_NR; i++) {
- win_data = &ctx->win_data[i];
- if (win_data->enabled && (ovl_ops && ovl_ops->commit))
- ovl_ops->commit(subdrv_dev, i);
- }
-
- if (mgr_ops && mgr_ops->commit)
- mgr_ops->commit(subdrv_dev);
-}
-
-static void fimd_commit(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
- u32 val;
-
- if (ctx->suspended)
- return;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /* setup polarity values from machine code. */
- writel(ctx->vidcon1, ctx->regs + VIDCON1);
-
- /* setup vertical timing values. */
- val = VIDTCON0_VBPD(timing->upper_margin - 1) |
- VIDTCON0_VFPD(timing->lower_margin - 1) |
- VIDTCON0_VSPW(timing->vsync_len - 1);
- writel(val, ctx->regs + VIDTCON0);
-
- /* setup horizontal timing values. */
- val = VIDTCON1_HBPD(timing->left_margin - 1) |
- VIDTCON1_HFPD(timing->right_margin - 1) |
- VIDTCON1_HSPW(timing->hsync_len - 1);
- writel(val, ctx->regs + VIDTCON1);
-
- /* setup horizontal and vertical display size. */
- val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1);
- writel(val, ctx->regs + VIDTCON2);
-
- /* setup clock source, clock divider, enable dma. */
- val = ctx->vidcon0;
- val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
- if (ctx->clkdiv > 1)
- val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
- else
- val &= ~VIDCON0_CLKDIR; /* 1:1 clock */
-
- /*
- * fields of register with prefix '_F' would be updated
- * at vsync(same as dma start)
- */
- val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
- writel(val, ctx->regs + VIDCON0);
-
- /*
- * fix fimd errata with mDNIe.
- *
- * this code fixes a issue that mDNIe unfunctions properly
- * when fimd power off goes to on. this issue is because dma
- * is enabled two times with setcrtc call once a process is
- * ternimated(at this thime, fimd goes to on from off for back
- * to console fb) so this condition would avoid the situation.
- */
- if (!ctx->errata) {
- /*
- * Workaround: After power domain is turned off then
- * when it is turned on, this needs.
- */
- val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
- writel(val, ctx->regs + VIDCON0);
-
- val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
- writel(val, ctx->regs + VIDCON0);
-
- ctx->errata = true;
- }
-}
-
-static int fimd_enable_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- u32 val;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (ctx->suspended)
- return -EPERM;
-
- if (!test_and_set_bit(0, &ctx->irq_flags)) {
- val = readl(ctx->regs + VIDINTCON0);
-
- val |= VIDINTCON0_INT_ENABLE;
- val |= VIDINTCON0_INT_FRAME;
-
- val &= ~VIDINTCON0_FRAMESEL0_MASK;
- val |= VIDINTCON0_FRAMESEL0_VSYNC;
- val &= ~VIDINTCON0_FRAMESEL1_MASK;
- val |= VIDINTCON0_FRAMESEL1_NONE;
-
- writel(val, ctx->regs + VIDINTCON0);
- }
-
- return 0;
-}
-
-static void fimd_disable_vblank(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- u32 val;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (ctx->suspended)
- return;
-
- if (test_and_clear_bit(0, &ctx->irq_flags)) {
- val = readl(ctx->regs + VIDINTCON0);
-
- val &= ~VIDINTCON0_INT_FRAME;
- val &= ~VIDINTCON0_INT_ENABLE;
-
- writel(val, ctx->regs + VIDINTCON0);
- }
-}
-
-static struct exynos_drm_manager_ops fimd_manager_ops = {
- .dpms = fimd_dpms,
- .apply = fimd_apply,
- .commit = fimd_commit,
- .enable_vblank = fimd_enable_vblank,
- .disable_vblank = fimd_disable_vblank,
-};
-
-static void fimd_win_mode_set(struct device *dev,
- struct exynos_drm_overlay *overlay)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
- int win;
- unsigned long offset;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (!overlay) {
- dev_err(dev, "overlay is NULL\n");
- return;
- }
-
- win = overlay->zpos;
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win > WINDOWS_NR)
- return;
-
- offset = overlay->fb_x * (overlay->bpp >> 3);
- offset += overlay->fb_y * overlay->pitch;
-
- DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
-
- win_data = &ctx->win_data[win];
-
- win_data->offset_x = overlay->crtc_x;
- win_data->offset_y = overlay->crtc_y;
- win_data->ovl_width = overlay->crtc_width;
- win_data->ovl_height = overlay->crtc_height;
- win_data->fb_width = overlay->fb_width;
- win_data->fb_height = overlay->fb_height;
- win_data->dma_addr = overlay->dma_addr[0] + offset;
- win_data->vaddr = overlay->vaddr[0] + offset;
- win_data->bpp = overlay->bpp;
- win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
- (overlay->bpp >> 3);
- win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
-
- DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
- win_data->offset_x, win_data->offset_y);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
- DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
- (unsigned long)win_data->dma_addr,
- (unsigned long)win_data->vaddr);
- DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
- overlay->fb_width, overlay->crtc_width);
-}
-
-static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data = &ctx->win_data[win];
- unsigned long val;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- val = WINCONx_ENWIN;
-
- switch (win_data->bpp) {
- case 1:
- val |= WINCON0_BPPMODE_1BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_4WORD;
- break;
- case 2:
- val |= WINCON0_BPPMODE_2BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_8WORD;
- break;
- case 4:
- val |= WINCON0_BPPMODE_4BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_8WORD;
- break;
- case 8:
- val |= WINCON0_BPPMODE_8BPP_PALETTE;
- val |= WINCONx_BURSTLEN_8WORD;
- val |= WINCONx_BYTSWP;
- break;
- case 16:
- val |= WINCON0_BPPMODE_16BPP_565;
- val |= WINCONx_HAWSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
- case 24:
- val |= WINCON0_BPPMODE_24BPP_888;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
- case 32:
- val |= WINCON1_BPPMODE_28BPP_A4888
- | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
- default:
- DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
-
- val |= WINCON0_BPPMODE_24BPP_888;
- val |= WINCONx_WSWP;
- val |= WINCONx_BURSTLEN_16WORD;
- break;
- }
-
- DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
-
- writel(val, ctx->regs + WINCON(win));
-}
-
-static void fimd_win_set_colkey(struct device *dev, unsigned int win)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- unsigned int keycon0 = 0, keycon1 = 0;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
- WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
-
- keycon1 = WxKEYCON1_COLVAL(0xffffffff);
-
- writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
- writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
-}
-
-static void fimd_win_commit(struct device *dev, int zpos)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
- int win = zpos;
- unsigned long val, alpha, size;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (ctx->suspended)
- return;
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win > WINDOWS_NR)
- return;
-
- win_data = &ctx->win_data[win];
-
- /*
- * SHADOWCON register is used for enabling timing.
- *
- * for example, once only width value of a register is set,
- * if the dma is started then fimd hardware could malfunction so
- * with protect window setting, the register fields with prefix '_F'
- * wouldn't be updated at vsync also but updated once unprotect window
- * is set.
- */
-
- /* protect windows */
- val = readl(ctx->regs + SHADOWCON);
- val |= SHADOWCON_WINx_PROTECT(win);
- writel(val, ctx->regs + SHADOWCON);
-
- /* buffer start address */
- val = (unsigned long)win_data->dma_addr;
- writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
-
- /* buffer end address */
- size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
- val = (unsigned long)(win_data->dma_addr + size);
- writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
-
- DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)win_data->dma_addr, val, size);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- win_data->ovl_width, win_data->ovl_height);
-
- /* buffer size */
- val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
- VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
- writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
-
- /* OSD position */
- val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
- VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
- writel(val, ctx->regs + VIDOSD_A(win));
-
- val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
- win_data->ovl_width - 1) |
- VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
- win_data->ovl_height - 1);
- writel(val, ctx->regs + VIDOSD_B(win));
-
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- win_data->offset_x, win_data->offset_y,
- win_data->offset_x + win_data->ovl_width - 1,
- win_data->offset_y + win_data->ovl_height - 1);
-
- /* hardware window 0 doesn't support alpha channel. */
- if (win != 0) {
- /* OSD alpha */
- alpha = VIDISD14C_ALPHA1_R(0xf) |
- VIDISD14C_ALPHA1_G(0xf) |
- VIDISD14C_ALPHA1_B(0xf);
-
- writel(alpha, ctx->regs + VIDOSD_C(win));
- }
-
- /* OSD size */
- if (win != 3 && win != 4) {
- u32 offset = VIDOSD_D(win);
- if (win == 0)
- offset = VIDOSD_C_SIZE_W0;
- val = win_data->ovl_width * win_data->ovl_height;
- writel(val, ctx->regs + offset);
-
- DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
- }
-
- fimd_win_set_pixfmt(dev, win);
-
- /* hardware window 0 doesn't support color key. */
- if (win != 0)
- fimd_win_set_colkey(dev, win);
-
- /* wincon */
- val = readl(ctx->regs + WINCON(win));
- val |= WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
-
- /* Enable DMA channel and unprotect windows */
- val = readl(ctx->regs + SHADOWCON);
- val |= SHADOWCON_CHx_ENABLE(win);
- val &= ~SHADOWCON_WINx_PROTECT(win);
- writel(val, ctx->regs + SHADOWCON);
-
- win_data->enabled = true;
-}
-
-static void fimd_win_disable(struct device *dev, int zpos)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct fimd_win_data *win_data;
- int win = zpos;
- u32 val;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (win == DEFAULT_ZPOS)
- win = ctx->default_win;
-
- if (win < 0 || win > WINDOWS_NR)
- return;
-
- win_data = &ctx->win_data[win];
-
- /* protect windows */
- val = readl(ctx->regs + SHADOWCON);
- val |= SHADOWCON_WINx_PROTECT(win);
- writel(val, ctx->regs + SHADOWCON);
-
- /* wincon */
- val = readl(ctx->regs + WINCON(win));
- val &= ~WINCONx_ENWIN;
- writel(val, ctx->regs + WINCON(win));
-
- /* unprotect windows */
- val = readl(ctx->regs + SHADOWCON);
- val &= ~SHADOWCON_CHx_ENABLE(win);
- val &= ~SHADOWCON_WINx_PROTECT(win);
- writel(val, ctx->regs + SHADOWCON);
-
- win_data->enabled = false;
-}
-
-static struct exynos_drm_overlay_ops fimd_overlay_ops = {
- .mode_set = fimd_win_mode_set,
- .commit = fimd_win_commit,
- .disable = fimd_win_disable,
-};
-
-static struct exynos_drm_manager fimd_manager = {
- .pipe = -1,
- .ops = &fimd_manager_ops,
- .overlay_ops = &fimd_overlay_ops,
- .display_ops = &fimd_display_ops,
-};
-
-static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
-{
- struct exynos_drm_private *dev_priv = drm_dev->dev_private;
- struct drm_pending_vblank_event *e, *t;
- struct timeval now;
- unsigned long flags;
- bool is_checked = false;
-
- spin_lock_irqsave(&drm_dev->event_lock, flags);
-
- list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
- base.link) {
- /* if event's pipe isn't same as crtc then ignore it. */
- if (crtc != e->pipe)
- continue;
-
- is_checked = true;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
-
- list_move_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
- }
-
- if (is_checked) {
- /*
- * call drm_vblank_put only in case that drm_vblank_get was
- * called.
- */
- if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
- drm_vblank_put(drm_dev, crtc);
-
- /*
- * don't off vblank if vblank_disable_allowed is 1,
- * because vblank would be off by timer handler.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, crtc);
- }
-
- spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-}
-
-static void exynos_fimd_schedule_work(struct work_struct *work)
-{
- struct fimd_context *ctx = container_of(work, struct fimd_context,
- work);
- u32 ret;
-
- /* Change mdnie mode after irq handler */
- if (mdnie && fimd_lite_dev) {
- if (!fimd_lite_dev->enabled) {
- while (1) {
- ret = (__raw_readl(ctx->regs + VIDCON1)) &
- VIDCON1_VSTATUS_MASK;
- if (ret == VIDCON1_VSTATUS_BACKPORCH) {
- exynos_drm_change_to_mdnie(ctx);
- fimd_lite_dev->enabled = true;
- break;
- }
- }
- }
- }
-
-}
-
-static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
-{
- struct fimd_context *ctx = (struct fimd_context *)dev_id;
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct drm_device *drm_dev = subdrv->drm_dev;
- struct exynos_drm_manager *manager = subdrv->manager;
- u32 val;
-
- val = readl(ctx->regs + VIDINTCON1);
-
- if (val & VIDINTCON1_INT_FRAME)
- /* VSYNC interrupt */
- writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
-
- /* check the crtc is detached already from encoder */
- if (manager->pipe < 0)
- goto out;
-
- drm_handle_vblank(drm_dev, manager->pipe);
- fimd_finish_pageflip(drm_dev, manager->pipe);
-
- schedule_work(&ctx->work);
-
-out:
- return IRQ_HANDLED;
-}
-
-static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * enable drm irq mode.
- * - with irq_enabled = 1, we can use the vblank feature.
- *
- * P.S. note that we wouldn't use drm irq handler but
- * just specific driver own one instead because
- * drm framework supports only one irq handler.
- */
- drm_dev->irq_enabled = 1;
-
- /*
- * with vblank_disable_allowed = 1, vblank interrupt will be disabled
- * by drm timer once a current process gives up ownership of
- * vblank event.(after drm_vblank_put function is called)
- */
- drm_dev->vblank_disable_allowed = 1;
-
- return 0;
-}
-
-static void fimd_subdrv_remove(struct drm_device *drm_dev)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /* TODO. */
-}
-
-static int fimd_calc_clkdiv(struct fimd_context *ctx,
- struct fb_videomode *timing)
-{
- unsigned long clk = clk_get_rate(ctx->lcd_clk);
- u32 retrace;
- u32 clkdiv;
- u32 best_framerate = 0;
- u32 framerate;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- retrace = timing->left_margin + timing->hsync_len +
- timing->right_margin + timing->xres;
- retrace *= timing->upper_margin + timing->vsync_len +
- timing->lower_margin + timing->yres;
-
- /* default framerate is 60Hz */
- if (!timing->refresh)
- timing->refresh = 60;
-
- clk /= retrace;
-
- for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
- int tmp;
-
- /* get best framerate */
- framerate = clk / clkdiv;
- tmp = timing->refresh - framerate;
- if (tmp < 0) {
- best_framerate = framerate;
- continue;
- } else {
- if (!best_framerate)
- best_framerate = framerate;
- else if (tmp < (best_framerate - framerate))
- best_framerate = framerate;
- break;
- }
- }
-
- return clkdiv;
-}
-
-static void fimd_clear_win(struct fimd_context *ctx, int win)
-{
- u32 val;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- writel(0, ctx->regs + WINCON(win));
- writel(0, ctx->regs + VIDOSD_A(win));
- writel(0, ctx->regs + VIDOSD_B(win));
- writel(0, ctx->regs + VIDOSD_C(win));
-
- if (win == 1 || win == 2)
- writel(0, ctx->regs + VIDOSD_D(win));
-
- val = readl(ctx->regs + SHADOWCON);
- val &= ~SHADOWCON_WINx_PROTECT(win);
- writel(val, ctx->regs + SHADOWCON);
-}
-
-int fimd_register_client(int (*client_notifier)(unsigned int val, void *data),
- void *data)
-{
- struct fimd_notifier_block *fimd_block;
-
- fimd_block = kzalloc(sizeof(*fimd_block), GFP_KERNEL);
- if (!fimd_block) {
- printk(KERN_ERR "failed to allocate fimd_notifier_block\n");
- return -ENOMEM;
- }
-
- fimd_block->client_notifier = client_notifier;
- fimd_block->data = data;
-
- mutex_lock(&fimd_notifier_lock);
- list_add_tail(&fimd_block->list, &fimd_notifier_list);
- mutex_unlock(&fimd_notifier_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(fimd_register_client);
-
-void fimd_unregister_client(int (*client_notifier)(unsigned int val,
- void *data))
-{
- struct fimd_notifier_block *fimd_block;
-
- mutex_lock(&fimd_notifier_lock);
- list_for_each_entry(fimd_block, &fimd_notifier_list, list) {
- if (!fimd_block)
- continue;
-
- if (fimd_block->client_notifier == client_notifier) {
- list_del(&fimd_block->list);
- kfree(fimd_block);
- fimd_block = NULL;
- break;
- }
- }
- mutex_unlock(&fimd_notifier_lock);
-}
-EXPORT_SYMBOL(fimd_unregister_client);
-
-static int fimd_notifier_call_chain(void)
-{
- struct fimd_notifier_block *fimd_block;
-
- mutex_lock(&fimd_notifier_lock);
- list_for_each_entry(fimd_block, &fimd_notifier_list, list) {
- if (fimd_block && fimd_block->client_notifier)
- fimd_block->client_notifier(0, fimd_block->data);
- }
- mutex_unlock(&fimd_notifier_lock);
-
- return 0;
-}
-
-static int fimd_power_on(struct fimd_context *ctx, bool enable)
-{
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct device *dev = subdrv->dev;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (enable != false && enable != true)
- return -EINVAL;
-
- if (enable) {
- int ret;
-
- /* fimd power should be off to clear mipi-dsi fifo. */
- fimd_notifier_call_chain();
-
- ret = clk_enable(ctx->bus_clk);
- if (ret < 0)
- return ret;
-
- ret = clk_enable(ctx->lcd_clk);
- if (ret < 0) {
- clk_disable(ctx->bus_clk);
- return ret;
- }
-
- ctx->suspended = false;
-
- /* if vblank was enabled status, enable it again. */
- if (test_and_clear_bit(0, &ctx->irq_flags))
- fimd_enable_vblank(dev);
-
- fimd_apply(dev);
- } else {
- if (fimd_lite_dev)
- exynos_drm_mdnie_mode_stop(ctx);
-
- clk_disable(ctx->lcd_clk);
- clk_disable(ctx->bus_clk);
-
- ctx->suspended = true;
- ctx->errata = false;
- }
-
- return 0;
-}
-
-static void exynos_drm_change_clock(struct fimd_context *ctx)
-{
- unsigned int cfg = 0;
- struct s5p_fimd_ext_driver *fimd_lite_drv;
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
-
- fimd_lite_drv = to_fimd_ext_driver(fimd_lite_dev->dev.driver);
-
- if (!ctx->dynamic_refresh) {
- timing->refresh = 60;
- ctx->clkdiv = fimd_calc_clkdiv(ctx, timing);
-#ifdef CONFIG_LCD_S6E8AA0
- /* workaround: To apply dynamic refresh rate */
- s6e8aa0_panel_cond(1);
-#endif
- if (fimd_lite_dev && fimd_lite_dev->enabled) {
- fimd_refresh->clkdiv = ctx->clkdiv;
- fimd_lite_drv->change_clock(fimd_refresh,
- fimd_lite_dev);
- } else {
- cfg = readl(ctx->regs + VIDCON0);
- cfg &= ~VIDCON0_CLKVAL_F(0xFF);
- cfg |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1);
- writel(cfg, ctx->regs + VIDCON0);
- }
- } else {
- ctx->clkdiv = fimd_calc_clkdiv(ctx, timing);
-#ifdef CONFIG_LCD_S6E8AA0
- /* workaround: To apply dynamic refresh rate */
- s6e8aa0_panel_cond(ctx->high_freq);
-#endif
- if (fimd_lite_dev && fimd_lite_dev->enabled) {
- fimd_refresh->clkdiv = ctx->clkdiv;
- fimd_lite_drv->change_clock(fimd_refresh,
- fimd_lite_dev);
- } else {
- cfg = readl(ctx->regs + VIDCON0);
- cfg &= ~VIDCON0_CLKVAL_F(0xFF);
- cfg |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1);
- writel(cfg, ctx->regs + VIDCON0);
- }
- }
-}
-
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
-static int exynos_display_notifier_callback(struct notifier_block *this,
- unsigned long event, void *_data)
-{
- struct fimd_context *ctx
- = container_of(this, struct fimd_context, nb_exynos_display);
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
-
- if (ctx->suspended)
- return NOTIFY_DONE;
-
- switch (event) {
- case EXYNOS4_DISPLAY_LV_HF:
- timing->refresh = EXYNOS4_DISPLAY_LV_HF;
- ctx->high_freq = 1;
- break;
- case EXYNOS4_DISPLAY_LV_LF:
- timing->refresh = EXYNOS4_DISPLAY_LV_LF;
- ctx->high_freq = 0;
- break;
- default:
- return NOTIFY_BAD;
- }
-
- exynos_drm_change_clock(ctx);
-
- return NOTIFY_DONE;
-}
-#endif
-
-static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
- unsigned long refresh;
- int ret;
-
- if (ctx->dynamic_refresh) {
- ret = kstrtoul(buf, 0, &refresh);
- timing->refresh = refresh;
- if (refresh == 60)
- ctx->high_freq = 1;
- else
- ctx->high_freq = 0;
-
- exynos_drm_change_clock(ctx);
- }
-
- return count;
-}
-
-static ssize_t show_refresh(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", timing->refresh);
-}
-
-static struct device_attribute device_attrs[] = {
- __ATTR(refresh, S_IRUGO|S_IWUSR, show_refresh, store_refresh),
-};
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
-static void fimd_set_writeback(struct fimd_context *ctx, int enable)
-{
- u32 vidcon0 = readl(ctx->regs + VIDCON0);
- u32 vidcon2 = readl(ctx->regs + VIDCON2);
-
- vidcon0 &= ~VIDCON0_VIDOUT_MASK;
- vidcon2 &= ~(VIDCON2_WB_MASK |
- VIDCON2_WB_SKIP_MASK |
- VIDCON2_TVFORMATSEL_HW_SW_MASK |
- VIDCON2_TVFORMATSEL_MASK);
-
- if (enable) {
- vidcon0 |= VIDCON0_VIDOUT_WB;
- vidcon2 |= (VIDCON2_WB_ENABLE |
- VIDCON2_TVFORMATSEL_SW |
- VIDCON2_TVFORMATSEL_YUV444);
- } else {
- vidcon0 |= VIDCON0_VIDOUT_RGB;
- vidcon2 |= VIDCON2_WB_DISABLE;
- }
-
- writel(vidcon0, ctx->regs + VIDCON0);
- writel(vidcon2, ctx->regs + VIDCON2);
-}
-
-static int fimd_notifier_ctrl(struct notifier_block *this,
- unsigned long event, void *_data)
-{
- struct fimd_context *ctx = container_of(this,
- struct fimd_context, nb_ctrl);
-
- switch (event) {
- case FIMD_GET_LCD_WIDTH: {
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
- int *width = (int *)_data;
-
- *width = timing->xres;
- }
- break;
- case FIMD_GET_LCD_HEIGHT: {
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
- int *height = (int *)_data;
-
- *height = timing->yres;
- }
- break;
- case FIMD_SET_WRITEBACK: {
- unsigned int refresh;
- int *enable = (int *)&_data;
-
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
- if (*enable)
- refresh = EXYNOS4_DISPLAY_LV_HF;
- else
- refresh = EXYNOS4_DISPLAY_LV_LF;
- pm_qos_update_request(&ctx->pm_qos,
- refresh);
-#endif
- fimd_set_writeback(ctx, *enable);
- }
- break;
- default:
- /* ToDo : for checking use case */
- DRM_INFO("%s:event[0x%x]\n", __func__, (unsigned int)event);
- break;
- }
-
- return NOTIFY_DONE;
-}
-#endif
-
-static int __devinit fimd_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct fimd_context *ctx;
- struct exynos_drm_subdrv *subdrv;
- struct exynos_drm_fimd_pdata *pdata;
- struct exynos_drm_panel_info *panel;
- struct resource *res;
- int win;
- int i;
- int ret = -EINVAL;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(dev, "no platform data specified\n");
- return -EINVAL;
- }
-
- panel = &pdata->panel;
- if (!panel) {
- dev_err(dev, "panel is null.\n");
- return -EINVAL;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->bus_clk = clk_get(dev, "lcd");
- if (IS_ERR(ctx->bus_clk)) {
- dev_err(dev, "failed to get bus clock\n");
- ret = PTR_ERR(ctx->bus_clk);
- goto err_clk_get;
- }
-
- clk_enable(ctx->bus_clk);
-
- ctx->lcd_clk = clk_get(dev, "sclk_fimd");
- if (IS_ERR(ctx->lcd_clk)) {
- dev_err(dev, "failed to get lcd clock\n");
- ret = PTR_ERR(ctx->lcd_clk);
- goto err_bus_clk;
- }
-
- clk_enable(ctx->lcd_clk);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to find registers\n");
- ret = -ENOENT;
- goto err_clk;
- }
-
- ctx->regs_res = request_mem_region(res->start, resource_size(res),
- dev_name(dev));
- if (!ctx->regs_res) {
- dev_err(dev, "failed to claim register region\n");
- ret = -ENOENT;
- goto err_clk;
- }
-
- ctx->regs = ioremap(res->start, resource_size(res));
- if (!ctx->regs) {
- dev_err(dev, "failed to map registers\n");
- ret = -ENXIO;
- goto err_req_region_io;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "irq request failed.\n");
- goto err_get_resource;
- }
-
- ctx->irq = res->start;
-
- ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
- if (ret < 0) {
- dev_err(dev, "irq request failed.\n");
- goto err_get_resource;
- }
-
- ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
- ctx->vidcon0 = pdata->vidcon0;
- ctx->vidcon1 = pdata->vidcon1;
- ctx->default_win = pdata->default_win;
- ctx->dynamic_refresh = pdata->dynamic_refresh;
- ctx->panel = panel;
-
- INIT_WORK(&ctx->work, exynos_fimd_schedule_work);
-
- panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
-
- DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
- panel->timing.pixclock, ctx->clkdiv);
-
- /* mdnie support. */
- mdnie = s5p_fimd_ext_find_device("mdnie");
- fimd_lite_dev = s5p_fimd_ext_find_device("fimd_lite");
- if (mdnie && fimd_lite_dev) {
- fimd_refresh = kzalloc(sizeof(*fimd_refresh), GFP_KERNEL);
- if (!fimd_refresh) {
- dev_err(dev, "failed to allocate fimd_refresh.\n");
- ret = -ENOMEM;
- goto err_alloc_fail;
- }
-
- fimd_refresh->dynamic_refresh = pdata->dynamic_refresh;
- fimd_refresh->regs = ctx->regs;
- fimd_refresh->clkdiv = ctx->clkdiv;
- }
-
- for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
- ret = device_create_file(&(pdev->dev),
- &device_attrs[i]);
- if (ret)
- break;
- }
-
- if (ret < 0)
- dev_err(&pdev->dev, "failed to add sysfs entries\n");
-
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
- ctx->nb_exynos_display.notifier_call = exynos_display_notifier_callback;
- ret = exynos4_display_register_client(&ctx->nb_exynos_display);
- if (ret < 0)
- dev_warn(dev, "failed to register exynos-display notifier\n");
-#endif
-
- dev_info(&pdev->dev, "registered successfully\n");
-
- subdrv = &ctx->subdrv;
-
- subdrv->dev = dev;
- subdrv->manager = &fimd_manager;
- subdrv->probe = fimd_subdrv_probe;
- subdrv->remove = fimd_subdrv_remove;
-
-#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
- ctx->nb_ctrl.notifier_call = fimd_notifier_ctrl;
- ret = fimc_register_client(&ctx->nb_ctrl);
- if (ret) {
- dev_err(dev, "could not register fimd notify callback\n");
- goto err_alloc_fail;
- }
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
- pm_qos_add_request(&ctx->pm_qos,
- PM_QOS_DISPLAY_FREQUENCY, EXYNOS4_DISPLAY_LV_LF);
-#endif
-#endif
-
- mutex_init(&ctx->lock);
-
- platform_set_drvdata(pdev, ctx);
-
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
-
- for (win = 0; win < WINDOWS_NR; win++)
- if (win != ctx->default_win)
- fimd_clear_win(ctx, win);
-
- exynos_drm_subdrv_register(subdrv);
-
- return 0;
-
-err_alloc_fail:
- free_irq(ctx->irq, ctx);
-
-err_get_resource:
- iounmap(ctx->regs);
-
-err_req_region_io:
- release_resource(ctx->regs_res);
- kfree(ctx->regs_res);
-
-err_clk:
- clk_disable(ctx->lcd_clk);
- clk_put(ctx->lcd_clk);
-
-err_bus_clk:
- clk_disable(ctx->bus_clk);
- clk_put(ctx->bus_clk);
-
-err_clk_get:
- kfree(ctx);
- return ret;
-}
-
-static int __devexit fimd_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct fimd_context *ctx = platform_get_drvdata(pdev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- exynos_drm_subdrv_unregister(&ctx->subdrv);
-#ifdef CONFIG_DRM_EXYNOS_FIMD_WB
- fimc_unregister_client(&ctx->nb_ctrl);
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
- pm_qos_remove_request(&ctx->pm_qos);
-#endif
-#endif
-
- if (ctx->suspended)
- goto out;
-
- pm_runtime_set_suspended(dev);
- pm_runtime_put_sync(dev);
-
-out:
- pm_runtime_disable(dev);
-
- clk_put(ctx->lcd_clk);
- clk_put(ctx->bus_clk);
-
-
-#ifdef CONFIG_ARM_EXYNOS4_DISPLAY_DEVFREQ
- exynos4_display_unregister_client(&ctx->nb_exynos_display);
-#endif
-
- iounmap(ctx->regs);
- release_resource(ctx->regs_res);
- kfree(ctx->regs_res);
- free_irq(ctx->irq, ctx);
-
- kfree(ctx);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int fimd_suspend(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- /*
- * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
- * called here, an error would be returned by that interface
- * because the usage_count of pm runtime is more than 1.
- */
- return fimd_power_on(ctx, false);
-}
-
-static int fimd_resume(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- /*
- * if entered to sleep when lcd panel was on, the usage_count
- * of pm runtime would still be 1 so in this case, fimd driver
- * should be on directly not drawing on pm runtime interface.
- */
- if (!pm_runtime_suspended(dev))
- return fimd_power_on(ctx, true);
-
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PM_RUNTIME
-static int fimd_runtime_suspend(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return fimd_power_on(ctx, false);
-}
-
-static int fimd_runtime_resume(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- return fimd_power_on(ctx, true);
-}
-#endif
-
-static const struct dev_pm_ops fimd_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
- SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
-};
-
-struct platform_driver fimd_driver = {
- .probe = fimd_probe,
- .remove = __devexit_p(fimd_remove),
- .driver = {
- .name = "s3cfb",
- .owner = THIS_MODULE,
- .pm = &fimd_pm_ops,
- },
-};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index a8f201e..5633207 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -22,6 +22,7 @@
#include "drmP.h"
#include "exynos_drm.h"
#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
@@ -93,7 +94,7 @@
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
-#define MAX_BUF_ADDR_NR 6
+#define MAX_BUF_ADDR_NR 6
/* cmdlist data structure */
struct g2d_cmdlist {
@@ -111,7 +112,7 @@ struct g2d_cmdlist_node {
struct list_head list;
struct g2d_cmdlist *cmdlist;
unsigned int map_nr;
- void *gem_obj[MAX_BUF_ADDR_NR];
+ void *gem_objs[MAX_BUF_ADDR_NR];
dma_addr_t dma_addr;
struct drm_exynos_pending_g2d_event *event;
@@ -152,7 +153,8 @@ struct g2d_data {
struct kmem_cache *runqueue_slab;
};
-static int g2d_init_cmdlist(struct g2d_data *g2d)
+static int g2d_init_cmdlist(struct g2d_data *g2d,
+ struct exynos_drm_private *drm_priv)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
@@ -171,8 +173,9 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
* pages contained in sg list to iommu table. Command list pool also is
* accessed by dma through device address with using iommu.
*/
- g2d->cmdlist_pool_map = exynos_drm_iommu_map(dev, g2d->cmdlist_pool,
- G2D_CMDLIST_POOL_SIZE);
+ g2d->cmdlist_pool_map = exynos_drm_iommu_map(drm_priv->vmm,
+ g2d->cmdlist_pool,
+ G2D_CMDLIST_POOL_SIZE);
if (!g2d->cmdlist_pool_map) {
dev_err(dev, "failed map to iommu\n");
ret = -EFAULT;
@@ -199,7 +202,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err_iommu_unmap:
- exynos_drm_iommu_unmap(dev, g2d->cmdlist_pool_map);
+ exynos_drm_iommu_unmap(drm_priv->vmm, g2d->cmdlist_pool_map);
err:
dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
g2d->cmdlist_pool);
@@ -208,13 +211,18 @@ err:
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct device *dev = g2d->dev;
+ struct exynos_drm_private *drm_priv;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+
+ drm_priv = subdrv->drm_dev->dev_private;
- exynos_drm_iommu_unmap(dev, g2d->cmdlist_pool_map);
+ if (drm_priv->vmm)
+ exynos_drm_iommu_unmap(drm_priv->vmm, g2d->cmdlist_pool_map);
kfree(g2d->cmdlist_node);
- dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool);
+ dma_free_coherent(g2d->dev, G2D_CMDLIST_POOL_SIZE,
+ g2d->cmdlist_pool_virt,
+ g2d->cmdlist_pool);
}
static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -269,70 +277,48 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
struct drm_device *drm_dev,
struct drm_file *file)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist *cmdlist = node->cmdlist;
- struct iommu_gem_map_params params;
- unsigned int gem_handle;
- dma_addr_t addr;
int offset;
int i;
- params.dev = g2d->dev;
- params.drm_dev = drm_dev;
- params.file = file;
-
for (i = 0; i < node->map_nr; i++) {
+ unsigned int gem_handle, gem_obj;
+ dma_addr_t *addr;
+
offset = cmdlist->last - (i * 2 + 1);
gem_handle = cmdlist->data[offset];
- addr = exynos_drm_iommu_map_gem(&params,
- &g2d_priv->iommu_map_list,
- gem_handle,
- IOMMU_G2D);
- if (!addr) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_handle,
+ file,
+ &gem_obj);
+ if (IS_ERR(addr)) {
node->map_nr = i;
return -EFAULT;
}
- cmdlist->data[offset] = addr;
- node->gem_obj[i] = params.gem_obj;
+ cmdlist->data[offset] = *addr;
+ node->gem_objs[i] = (void *)gem_obj;
}
return 0;
}
-static void g2d_unmap_cmdlist_gem(struct drm_device *drm_dev,
- struct g2d_cmdlist_node *node,
- struct drm_file *file, int dec)
+static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
+ struct g2d_cmdlist_node *node)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
- struct g2d_cmdlist *cmdlist = node->cmdlist;
- struct iommu_gem_map_params params;
- struct g2d_data *g2d;
- dma_addr_t addr;
- int offset;
+ struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int i;
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return;
-
- params.dev = dev;
- params.drm_dev = drm_dev;
- params.file = file;
-
for (i = 0; i < node->map_nr; i++) {
- offset = cmdlist->last - (i * 2 + 1) - dec;
- addr = cmdlist->data[offset];
+ void *gem_obj = node->gem_objs[i];
- params.gem_obj = node->gem_obj[i];
+ if (gem_obj)
+ exynos_drm_gem_put_dma_addr(subdrv->drm_dev, gem_obj);
- exynos_drm_iommu_unmap_gem(&params, addr,
- IOMMU_G2D);
+ node->gem_objs[i] = NULL;
}
+
+ node->map_nr = 0;
}
static void g2d_dma_start(struct g2d_data *g2d,
@@ -344,7 +330,7 @@ static void g2d_dma_start(struct g2d_data *g2d,
pm_runtime_get_sync(g2d->dev);
clk_enable(g2d->gate_clk);
- pm_qos_update_request(&g2d->pm_qos, 400000);
+ pm_qos_update_request(&g2d->pm_qos, 400200);
/* interrupt enable */
writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
@@ -370,10 +356,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
static void g2d_free_runqueue_node(struct g2d_data *g2d,
struct g2d_runqueue_node *runqueue_node)
{
+ struct g2d_cmdlist_node *node;
+
if (!runqueue_node)
return;
mutex_lock(&g2d->cmdlist_mutex);
+ /*
+ * commands in run_cmdlist have been completed so unmap all gem
+ * objects in each command node so that they are unreferenced.
+ */
+ list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
+ g2d_unmap_cmdlist_gem(g2d, node);
list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
mutex_unlock(&g2d->cmdlist_mutex);
@@ -398,7 +392,15 @@ static void g2d_runqueue_worker(struct work_struct *work)
clk_disable(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
- complete(&g2d->runqueue_node->complete);
+ /* if async mode, do not call complete. */
+ if (!g2d->runqueue_node->async)
+ complete(&g2d->runqueue_node->complete);
+
+ /*
+ * if async mode, run_cmdlist of runqueue_node is not freed
+ * at exynos_g2d_exec_ioctl once complete because wait_for_completion
+ * wasn't called there so free it here.
+ */
if (g2d->runqueue_node->async)
g2d_free_runqueue_node(g2d, g2d->runqueue_node);
@@ -444,12 +446,14 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
if (pending & G2D_INTP_GCMD_FIN) {
- u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+ u32 value, list_done_count;
- cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
+ value = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
+
+ list_done_count = (value & G2D_DMA_LIST_DONE_COUNT) >>
G2D_DMA_LIST_DONE_COUNT_OFFSET;
- g2d_finish_event(g2d, cmdlist_no);
+ g2d_finish_event(g2d, list_done_count);
writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
if (!(pending & G2D_INTP_ACMD_FIN)) {
@@ -461,6 +465,8 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
if (pending & G2D_INTP_ACMD_FIN)
queue_work(g2d->g2d_workq, &g2d->runqueue_work);
+ writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
+
return IRQ_HANDLED;
}
@@ -653,7 +659,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
return 0;
err_unmap:
- g2d_unmap_cmdlist_gem(drm_dev, node, file, 0);
+ g2d_unmap_cmdlist_gem(g2d, node);
err_free_event:
if (node->event) {
spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -724,11 +730,57 @@ out:
}
EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
-static int g2d_open(struct drm_device *drm_dev, struct device *dev,
+static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_private *drm_priv;
+ struct g2d_data *g2d;
+ int ret;
+
+ drm_priv = drm_dev->dev_private;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
+ /* allocate dma-aware cmdlist buffer and map it with iommu table. */
+ ret = g2d_init_cmdlist(g2d, drm_priv);
+ if (ret < 0)
+ return ret;
+
+ /* enable iommu to g2d hardware */
+ ret = exynos_drm_iommu_activate(drm_priv->vmm, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to activate iommu\n");
+ goto err_fini_cmdlist;
+ }
+
+ return ret;
+
+err_fini_cmdlist:
+ g2d_fini_cmdlist(g2d);
+ return ret;
+}
+
+static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_private *drm_priv;
+
+ drm_priv = drm_dev->dev_private;
+
+ if (drm_priv->vmm)
+ exynos_drm_iommu_deactivate(drm_priv->vmm, dev);
+}
+
+static int g2d_subdrv_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv;
+ struct g2d_data *g2d;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
if (!g2d_priv) {
@@ -741,20 +793,17 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->iommu_map_list);
return 0;
}
-static void g2d_close(struct drm_device *drm_dev, struct device *dev,
+static void g2d_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
- struct iommu_info_node *im, *t_im;
- struct iommu_gem_map_params params;
if (!dev)
return;
@@ -764,27 +813,18 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
return;
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list)
+ list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ /*
+ * unmap all gem objects not completed.
+ *
+ * P.S. if current process was terminated forcely then
+ * there may be some commands in inuse_cmdlist so unmap
+ * them.
+ */
+ g2d_unmap_cmdlist_gem(g2d, node);
list_move_tail(&node->list, &g2d->free_cmdlist);
- mutex_unlock(&g2d->cmdlist_mutex);
-
- params.dev = dev;
- params.drm_dev = drm_dev;
- params.file = file;
-
- /*
- * unmap all device address spaces from iommu table and
- * release all lists.
- */
- list_for_each_entry_safe(im, t_im, &g2d_priv->iommu_map_list, list) {
- params.gem_obj = im->gem_obj;
-
- exynos_drm_iommu_unmap_gem(&params, im->dma_addr,
- IOMMU_G2D);
- list_del(&im->list);
- kfree(im);
- im = NULL;
}
+ mutex_unlock(&g2d->cmdlist_mutex);
kfree(file_priv->g2d_priv);
}
@@ -826,29 +866,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
mutex_init(&g2d->cmdlist_mutex);
mutex_init(&g2d->runqueue_mutex);
- /* setup device address space for g2d iommu */
- ret = exynos_drm_iommu_setup(dev);
- if (ret < 0) {
- dev_err(dev, "failed to setup iommu\n");
- goto err_destroy_workqueue;
- }
-
- /* enable iommu to g2d hardware */
- ret = exynos_drm_iommu_activate(dev);
- if (ret < 0) {
- dev_err(dev, "failed to activate iommu\n");
- goto err_iommu_cleanup;
- }
-
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0)
- goto err_iommu_deactivate;
-
g2d->gate_clk = clk_get(dev, "fimg2d");
if (IS_ERR(g2d->gate_clk)) {
dev_err(dev, "failed to get gate clock\n");
ret = PTR_ERR(g2d->gate_clk);
- goto err_fini_cmdlist;
+ goto err_destory_workqueue;
}
pm_runtime_enable(dev);
@@ -892,8 +914,10 @@ static int __devinit g2d_probe(struct platform_device *pdev)
subdrv = &g2d->subdrv;
subdrv->dev = dev;
- subdrv->open = g2d_open;
- subdrv->close = g2d_close;
+ subdrv->probe = g2d_subdrv_probe;
+ subdrv->remove = g2d_subdrv_remove;
+ subdrv->open = g2d_subdrv_open;
+ subdrv->close = g2d_subdrv_close;
ret = exynos_drm_subdrv_register(subdrv);
if (ret < 0) {
@@ -918,13 +942,7 @@ err_release_res:
err_put_clk:
pm_runtime_disable(dev);
clk_put(g2d->gate_clk);
-err_fini_cmdlist:
- g2d_fini_cmdlist(g2d);
-err_iommu_deactivate:
- exynos_drm_iommu_deactivate(dev);
-err_iommu_cleanup:
- exynos_drm_iommu_cleanup(dev);
-err_destroy_workqueue:
+err_destory_workqueue:
destroy_workqueue(g2d->g2d_workq);
err_destroy_slab:
kmem_cache_destroy(g2d->runqueue_slab);
@@ -955,8 +973,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
clk_put(g2d->gate_clk);
g2d_fini_cmdlist(g2d);
- exynos_drm_iommu_deactivate(&pdev->dev);
- exynos_drm_iommu_cleanup(&pdev->dev);
destroy_workqueue(g2d->g2d_workq);
kmem_cache_destroy(g2d->runqueue_slab);
kfree(g2d);
@@ -968,6 +984,8 @@ static int __devexit g2d_remove(struct platform_device *pdev)
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct exynos_drm_private *drm_priv = drm_dev->dev_private;
mutex_lock(&g2d->runqueue_mutex);
g2d->suspended = true;
@@ -979,12 +997,25 @@ static int g2d_suspend(struct device *dev)
flush_work_sync(&g2d->runqueue_work);
+ /* disable iommu to g2d device. */
+ exynos_drm_iommu_deactivate(drm_priv->vmm, dev);
+
return 0;
}
static int g2d_resume(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct exynos_drm_private *drm_priv = drm_dev->dev_private;
+ int ret;
+
+ /* enable iommu to g2d hardware */
+ ret = exynos_drm_iommu_activate(drm_priv->vmm, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to activate iommu\n");
+ return ret;
+ }
g2d->suspended = false;
g2d_exec_runqueue(g2d);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 35d2cd9..7d12f6c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -35,6 +35,8 @@
#include "exynos_drm_buf.h"
#include "exynos_drm_iommu.h"
+#define USERPTR_MAX_SIZE SZ_64M
+
static struct exynos_drm_private_cb *private_cb;
void exynos_drm_priv_cb_register(struct exynos_drm_private_cb *cb)
@@ -132,6 +134,45 @@ static void put_vma(struct vm_area_struct *vma)
kfree(vma);
}
+/*
+ * lock_userptr_vma - lock VMAs within user address space
+ *
+ * this function locks vma within user address space to avoid pages
+ * to the userspace from being swapped out.
+ * if this vma isn't locked, the pages to the userspace could be swapped out
+ * so unprivileged user might access different pages and dma of any device
+ * could access physical memory region not intended once swap-in.
+ */
+static int lock_userptr_vma(struct exynos_drm_gem_buf *buf, unsigned int lock)
+{
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+
+ start = buf->userptr;
+ end = buf->userptr + buf->size - 1;
+
+ down_write(&current->mm->mmap_sem);
+
+ do {
+ vma = find_vma(current->mm, start);
+ if (!vma) {
+ up_write(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+
+ if (lock)
+ vma->vm_flags |= VM_LOCKED;
+ else
+ vma->vm_flags &= ~VM_LOCKED;
+
+ start = vma->vm_end + 1;
+ } while (vma->vm_end < end);
+
+ up_write(&current->mm->mmap_sem);
+
+ return 0;
+}
+
static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
struct vm_area_struct *vma)
{
@@ -165,25 +206,17 @@ out:
struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
gfp_t gfpmask)
{
- struct inode *inode;
- struct address_space *mapping;
struct page *p, **pages;
int i, npages;
- /* This is the shared memory object that backs the GEM resource */
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
-
npages = obj->size >> PAGE_SHIFT;
pages = drm_malloc_ab(npages, sizeof(struct page *));
if (pages == NULL)
return ERR_PTR(-ENOMEM);
- gfpmask |= mapping_gfp_mask(mapping);
-
for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ p = alloc_page(gfpmask);
if (IS_ERR(p))
goto fail;
pages[i] = p;
@@ -192,31 +225,22 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
return pages;
fail:
- while (i--)
- page_cache_release(pages[i]);
+ while (--i)
+ __free_page(pages[i]);
drm_free_large(pages);
return ERR_PTR(PTR_ERR(p));
}
static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages,
- bool dirty, bool accessed)
+ struct page **pages)
{
- int i, npages;
+ int npages;
npages = obj->size >> PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- if (accessed)
- mark_page_accessed(pages[i]);
-
- /* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
- }
+ while (--npages >= 0)
+ __free_page(pages[npages]);
drm_free_large(pages);
}
@@ -236,7 +260,7 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
pfn = page_to_pfn(buf->pages[page_offset++]);
} else
- pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+ pfn = (buf->paddr >> PAGE_SHIFT) + page_offset;
return vm_insert_mixed(vma, f_vaddr, pfn);
}
@@ -255,7 +279,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
return -EINVAL;
}
- pages = exynos_gem_get_pages(obj, GFP_KERNEL);
+ pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
if (IS_ERR(pages)) {
DRM_ERROR("failed to get pages.\n");
return PTR_ERR(pages);
@@ -288,15 +312,13 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
sgl = sg_next(sgl);
}
- /* add some codes for UNCACHED type here. TODO */
-
buf->pages = pages;
return ret;
err1:
kfree(buf->sgt);
buf->sgt = NULL;
err:
- exynos_gem_put_pages(obj, pages, true, false);
+ exynos_gem_put_pages(obj, pages);
return ret;
}
@@ -314,7 +336,7 @@ static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
kfree(buf->sgt);
buf->sgt = NULL;
- exynos_gem_put_pages(obj, buf->pages, true, false);
+ exynos_gem_put_pages(obj, buf->pages);
buf->pages = NULL;
/* add some codes for UNCACHED type here. TODO */
@@ -338,6 +360,9 @@ static void exynos_drm_put_userptr(struct drm_gem_object *obj)
npages = buf->size >> PAGE_SHIFT;
+ if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR && !buf->pfnmap)
+ lock_userptr_vma(buf, 0);
+
npages--;
while (npages >= 0) {
if (buf->write)
@@ -381,10 +406,12 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
{
struct drm_gem_object *obj;
struct exynos_drm_gem_buf *buf;
+ struct exynos_drm_private *private;
DRM_DEBUG_KMS("%s\n", __FILE__);
obj = &exynos_gem_obj->base;
+ private = obj->dev->dev_private;
buf = exynos_gem_obj->buffer;
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
@@ -401,6 +428,18 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (!buf->pages)
return;
+ /*
+ * do not release memory region from exporter.
+ *
+ * the region will be released by exporter
+ * once dmabuf's refcount becomes 0.
+ */
+ if (obj->import_attach)
+ goto out;
+
+ if (private->vmm)
+ exynos_drm_iommu_unmap_gem(obj);
+
if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
exynos_drm_gem_put_pages(obj);
else if (exynos_gem_obj->flags & EXYNOS_BO_USERPTR)
@@ -408,6 +447,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
else
exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
@@ -441,6 +481,27 @@ struct exynos_drm_gem_obj *exynos_drm_gem_get_obj(struct drm_device *dev,
return exynos_gem_obj;
}
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv)
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct drm_gem_object *obj;
+
+ obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ return 0;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return exynos_gem_obj->buffer->size;
+}
+
+
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -474,7 +535,9 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_gem_buf *buf;
+ unsigned long packed_size = size;
int ret;
if (!size) {
@@ -499,6 +562,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
goto err_fini_buf;
}
+ exynos_gem_obj->packed_size = packed_size;
exynos_gem_obj->buffer = buf;
/* set memory type and cache attribute from user side. */
@@ -522,6 +586,31 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
}
}
+ if (private->vmm) {
+ exynos_gem_obj->vmm = private->vmm;
+
+ buf->dev_addr = exynos_drm_iommu_map_gem(dev,
+ &exynos_gem_obj->base);
+ if (!buf->dev_addr) {
+ DRM_ERROR("failed to map gem with iommu table.\n");
+ ret = -EFAULT;
+
+ if (flags & EXYNOS_BO_NONCONTIG)
+ exynos_drm_gem_put_pages(&exynos_gem_obj->base);
+ else
+ exynos_drm_free_buf(dev, flags, buf);
+
+ drm_gem_object_release(&exynos_gem_obj->base);
+
+ goto err_fini_buf;
+ }
+
+ buf->dma_addr = buf->dev_addr;
+ } else
+ buf->dma_addr = buf->paddr;
+
+ DRM_DEBUG_KMS("dma_addr = 0x%x\n", buf->dma_addr);
+
return exynos_gem_obj;
err_fini_buf:
@@ -549,63 +638,49 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
return ret;
}
- return 0;
+ return ret;
}
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv)
+ struct drm_file *filp,
+ unsigned int *gem_obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+ obj = drm_gem_object_lookup(dev, filp, gem_handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
return ERR_PTR(-EINVAL);
}
exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
+ *gem_obj = (unsigned int)obj;
- /* TODO */
- return ERR_PTR(-EINVAL);
- }
-
- return &exynos_gem_obj->buffer->dma_addr;
+ return &buf->dma_addr;
}
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void *gem_obj)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
+ if (!gem_obj)
return;
- }
- exynos_gem_obj = to_exynos_gem_obj(obj);
+ /* use gem handle instead of object. TODO */
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- DRM_DEBUG_KMS("not support NONCONTIG type.\n");
- drm_gem_object_unreference_unlocked(obj);
+ obj = gem_obj;
- /* TODO */
- return;
- }
-
- drm_gem_object_unreference_unlocked(obj);
+ exynos_gem_obj = to_exynos_gem_obj(obj);
/*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
+ * unreference this gem object because this had already been
+ * referenced at exynos_drm_gem_get_dma_addr().
*/
drm_gem_object_unreference_unlocked(obj);
}
@@ -681,7 +756,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
* get page frame number to physical memory to be mapped
* to user space.
*/
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
+ pfn = ((unsigned long)exynos_gem_obj->buffer->paddr) >>
PAGE_SHIFT;
DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
@@ -749,7 +824,9 @@ static int exynos_drm_get_userptr(struct drm_device *dev,
unsigned long npages = 0;
struct vm_area_struct *vma;
struct exynos_drm_gem_buf *buf = obj->buffer;
+ int ret;
+ down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, userptr);
/* the memory region mmaped with VM_PFNMAP. */
@@ -767,11 +844,11 @@ static int exynos_drm_get_userptr(struct drm_device *dev,
for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
ret = follow_pfn(vma, start, &this_pfn);
if (ret)
- return ret;
+ goto err;
if (prev_pfn == 0) {
pa = this_pfn << PAGE_SHIFT;
- buf->dma_addr = pa + offset;
+ buf->paddr = pa + offset;
} else if (this_pfn != prev_pfn + 1) {
ret = -EINVAL;
goto err;
@@ -791,14 +868,30 @@ static int exynos_drm_get_userptr(struct drm_device *dev,
goto err;
}
+ up_read(&current->mm->mmap_sem);
buf->pfnmap = true;
return npages;
err:
- buf->dma_addr = 0;
+ buf->paddr = 0;
+ up_read(&current->mm->mmap_sem);
+
return ret;
}
+ up_read(&current->mm->mmap_sem);
+
+ /*
+ * lock the vma within userptr to avoid userspace buffer
+ * from being swapped out.
+ */
+ ret = lock_userptr_vma(buf, 1);
+ if (ret < 0) {
+ DRM_ERROR("failed to lock vma for userptr.\n");
+ lock_userptr_vma(buf, 0);
+ return 0;
+ }
+
buf->write = write;
npages = buf->size >> PAGE_SHIFT;
@@ -809,6 +902,7 @@ err:
if (get_npages != npages)
DRM_ERROR("failed to get user_pages.\n");
+ buf->userptr = userptr;
buf->pfnmap = false;
return get_npages;
@@ -817,11 +911,12 @@ err:
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct exynos_drm_private *priv = dev->dev_private;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_userptr *args = data;
struct exynos_drm_gem_buf *buf;
struct scatterlist *sgl;
- unsigned long size, userptr;
+ unsigned long size, userptr, packed_size;
unsigned int npages;
int ret, get_npages;
@@ -836,7 +931,15 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
+ packed_size = args->size;
+
size = roundup_gem_size(args->size, EXYNOS_BO_USERPTR);
+
+ if (size > priv->userptr_limit) {
+ DRM_ERROR("excessed maximum size of userptr.\n");
+ return -EINVAL;
+ }
+
userptr = args->userptr;
buf = exynos_drm_init_buf(dev, size);
@@ -849,6 +952,8 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto err_free_buffer;
}
+ exynos_gem_obj->packed_size = packed_size;
+
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate buf->sgt.\n");
@@ -909,8 +1014,30 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* always use EXYNOS_BO_USERPTR as memory type for userptr. */
exynos_gem_obj->flags |= EXYNOS_BO_USERPTR;
+ if (priv->vmm) {
+ exynos_gem_obj->vmm = priv->vmm;
+
+ buf->dev_addr = exynos_drm_iommu_map_gem(dev,
+ &exynos_gem_obj->base);
+ if (!buf->dev_addr) {
+ DRM_ERROR("failed to map gem with iommu table.\n");
+ ret = -EFAULT;
+
+ exynos_drm_free_buf(dev, exynos_gem_obj->flags, buf);
+
+ drm_gem_object_release(&exynos_gem_obj->base);
+
+ goto err_release_handle;
+ }
+
+ buf->dma_addr = buf->dev_addr;
+ } else
+ buf->dma_addr = buf->paddr;
+
return 0;
+err_release_handle:
+ drm_gem_handle_delete(file_priv, args->handle);
err_release_userptr:
get_npages--;
while (get_npages >= 0)
@@ -957,6 +1084,26 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
return 0;
}
+int exynos_drm_gem_user_limit_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct exynos_drm_private *priv = dev->dev_private;
+ struct drm_exynos_user_limit *limit = data;
+
+ if (limit->userptr_limit < PAGE_SIZE ||
+ limit->userptr_limit > USERPTR_MAX_SIZE) {
+ DRM_DEBUG_KMS("invalid userptr_limit size.\n");
+ return -EINVAL;
+ }
+
+ if (priv->userptr_limit == limit->userptr_limit)
+ return 0;
+
+ priv->userptr_limit = limit->userptr_limit;
+
+ return 0;
+}
+
int exynos_drm_gem_export_ump_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -1034,13 +1181,16 @@ static int exynos_gem_l1_cache_ops(struct drm_device *drm_dev,
}
static int exynos_gem_l2_cache_ops(struct drm_device *drm_dev,
- struct drm_exynos_gem_cache_op *op) {
- phys_addr_t phy_start, phy_end;
-
+ struct drm_file *filp,
+ struct drm_exynos_gem_cache_op *op)
+{
if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE ||
op->flags & EXYNOS_DRM_CACHE_INV_RANGE ||
op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
+ unsigned long virt_start = op->usr_addr, pfn;
+ phys_addr_t phy_start, phy_end;
struct vm_area_struct *vma;
+ int ret;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, op->usr_addr);
@@ -1052,44 +1202,90 @@ static int exynos_gem_l2_cache_ops(struct drm_device *drm_dev,
}
/*
- * for range flush to l2 cache, mmaped memory region should
- * be physically continuous because l2 cache uses PIPT.
+ * Range operation to l2 cache(PIPT)
*/
if (vma && (vma->vm_flags & VM_PFNMAP)) {
- unsigned long virt_start = op->usr_addr, pfn;
- int ret;
-
ret = follow_pfn(vma, virt_start, &pfn);
if (ret < 0) {
- DRM_ERROR("failed to get pfn from usr_addr.\n");
+ DRM_ERROR("failed to get pfn.\n");
return ret;
}
+ /*
+ * the memory region with VM_PFNMAP is contiguous
+ * physically so do range operagion just one time.
+ */
phy_start = pfn << PAGE_SHIFT;
phy_end = phy_start + op->size;
+
+ if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
+ outer_flush_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
+ outer_inv_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
+ outer_clean_range(phy_start, phy_end);
+
+ return 0;
} else {
- DRM_ERROR("not mmaped memory region with PFNMAP.\n");
- return -EINVAL;
+ struct exynos_drm_gem_obj *exynos_obj;
+ struct exynos_drm_gem_buf *buf;
+ struct drm_gem_object *obj;
+ struct scatterlist *sgl;
+ unsigned int npages, i = 0;
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(drm_dev, filp,
+ op->gem_handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ mutex_unlock(&drm_dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ exynos_obj = to_exynos_gem_obj(obj);
+ buf = exynos_obj->buffer;
+ npages = buf->size >> PAGE_SHIFT;
+ sgl = buf->sgt->sgl;
+
+ drm_gem_object_unreference(obj);
+ mutex_unlock(&drm_dev->struct_mutex);
+
+ /*
+ * in this case, the memory region is non-contiguous
+ * physically so do range operation to all the pages.
+ */
+ while (i < npages) {
+ phy_start = sg_dma_address(sgl);
+ phy_end = phy_start + buf->page_size;
+
+ if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
+ outer_flush_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
+ outer_inv_range(phy_start, phy_end);
+ else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
+ outer_clean_range(phy_start, phy_end);
+
+ i++;
+ sgl = sg_next(sgl);
+ }
+
+ return 0;
}
}
if (op->flags & EXYNOS_DRM_CACHE_FSH_ALL)
outer_flush_all();
- else if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE)
- outer_flush_range(phy_start, phy_end);
else if (op->flags & EXYNOS_DRM_CACHE_INV_ALL)
outer_inv_all();
else if (op->flags & EXYNOS_DRM_CACHE_CLN_ALL)
outer_clean_all();
- else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE)
- outer_inv_range(phy_start, phy_end);
- else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE)
- outer_clean_range(phy_start, phy_end);
else {
DRM_ERROR("invalid l2 cache operation.\n");
return -EINVAL;
}
+
return 0;
}
@@ -1105,6 +1301,33 @@ int exynos_drm_gem_cache_op_ioctl(struct drm_device *drm_dev, void *data,
if (ret)
return -EINVAL;
+ /*
+ * do cache operation for all cache range if op->size is bigger
+ * than SZ_1M because cache range operation with bit size has
+ * big cost.
+ */
+ if (op->size >= SZ_1M) {
+ if (op->flags & EXYNOS_DRM_CACHE_FSH_RANGE) {
+ if (op->flags & EXYNOS_DRM_L1_CACHE)
+ __cpuc_flush_user_all();
+
+ if (op->flags & EXYNOS_DRM_L2_CACHE)
+ outer_flush_all();
+
+ return 0;
+ } else if (op->flags & EXYNOS_DRM_CACHE_INV_RANGE) {
+ if (op->flags & EXYNOS_DRM_L2_CACHE)
+ outer_inv_all();
+
+ return 0;
+ } else if (op->flags & EXYNOS_DRM_CACHE_CLN_RANGE) {
+ if (op->flags & EXYNOS_DRM_L2_CACHE)
+ outer_clean_all();
+
+ return 0;
+ }
+ }
+
if (op->flags & EXYNOS_DRM_L1_CACHE ||
op->flags & EXYNOS_DRM_ALL_CACHES) {
ret = exynos_gem_l1_cache_ops(drm_dev, op);
@@ -1114,7 +1337,7 @@ int exynos_drm_gem_cache_op_ioctl(struct drm_device *drm_dev, void *data,
if (op->flags & EXYNOS_DRM_L2_CACHE ||
op->flags & EXYNOS_DRM_ALL_CACHES)
- ret = exynos_gem_l2_cache_ops(drm_dev, op);
+ ret = exynos_gem_l2_cache_ops(drm_dev, file_priv, op);
err:
return ret;
}
@@ -1150,7 +1373,7 @@ int exynos_drm_gem_get_phy_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- get_phy->phy_addr = exynos_gem_obj->buffer->dma_addr;
+ get_phy->phy_addr = exynos_gem_obj->buffer->paddr;
get_phy->size = exynos_gem_obj->buffer->size;
drm_gem_object_unreference(obj);
@@ -1164,24 +1387,34 @@ int exynos_drm_gem_phy_imp_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_gem_phy_imp *args = data;
struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_private *private = drm_dev->dev_private;
struct exynos_drm_gem_buf *buffer;
+ unsigned long size, packed_size;
+ unsigned int flags = EXYNOS_BO_CONTIG;
+ unsigned int npages, i = 0;
+ struct scatterlist *sgl;
+ dma_addr_t start_addr;
int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_init(drm_dev, args->size);
+ packed_size = args->size;
+ size = roundup_gem_size(args->size, flags);
+
+ exynos_gem_obj = exynos_drm_gem_init(drm_dev, size);
if (!exynos_gem_obj)
return -ENOMEM;
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ buffer = exynos_drm_init_buf(drm_dev, size);
if (!buffer) {
DRM_DEBUG_KMS("failed to allocate buffer\n");
ret = -ENOMEM;
- goto err;
+ goto err_release_gem_obj;
}
- buffer->dma_addr = (dma_addr_t)args->phy_addr;
- buffer->size = args->size;
+ exynos_gem_obj->packed_size = packed_size;
+ buffer->paddr = (dma_addr_t)args->phy_addr;
+ buffer->size = size;
/*
* if shared is true, this bufer wouldn't be released.
@@ -1194,15 +1427,90 @@ int exynos_drm_gem_phy_imp_ioctl(struct drm_device *drm_dev, void *data,
ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
&args->gem_handle);
if (ret)
- goto err_kfree_buffer;
+ goto err_fini_buf;
DRM_DEBUG_KMS("got gem handle = 0x%x\n", args->gem_handle);
+ if (buffer->size >= SZ_1M) {
+ npages = buffer->size >> SECTION_SHIFT;
+ buffer->page_size = SECTION_SIZE;
+ } else if (buffer->size >= SZ_64K) {
+ npages = buffer->size >> 16;
+ buffer->page_size = SZ_64K;
+ } else {
+ npages = buffer->size >> PAGE_SHIFT;
+ buffer->page_size = PAGE_SIZE;
+ }
+
+ buffer->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!buffer->sgt) {
+ DRM_ERROR("failed to allocate sg table.\n");
+ ret = -ENOMEM;
+ goto err_release_handle;
+ }
+
+ ret = sg_alloc_table(buffer->sgt, npages, GFP_KERNEL);
+ if (ret < 0) {
+ DRM_ERROR("failed to initialize sg table.\n");
+ goto err_free_sgt;
+ }
+
+ buffer->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
+ if (!buffer->pages) {
+ DRM_ERROR("failed to allocate pages.\n");
+ ret = -ENOMEM;
+ goto err_sg_free_table;
+ }
+
+ sgl = buffer->sgt->sgl;
+ start_addr = buffer->paddr;
+
+ while (i < npages) {
+ buffer->pages[i] = phys_to_page(start_addr);
+ sg_set_page(sgl, buffer->pages[i], buffer->page_size, 0);
+ sg_dma_address(sgl) = start_addr;
+ start_addr += buffer->page_size;
+ sgl = sg_next(sgl);
+ i++;
+ }
+
+ if (private->vmm) {
+ exynos_gem_obj->vmm = private->vmm;
+
+ buffer->dev_addr = exynos_drm_iommu_map_gem(drm_dev,
+ &exynos_gem_obj->base);
+ if (!buffer->dev_addr) {
+ DRM_ERROR("failed to map gem with iommu table.\n");
+ ret = -EFAULT;
+
+ exynos_drm_free_buf(drm_dev, flags, buffer);
+
+ drm_gem_object_release(&exynos_gem_obj->base);
+
+ goto err_free_pages;
+ }
+
+ buffer->dma_addr = buffer->dev_addr;
+ } else
+ buffer->dma_addr = buffer->paddr;
+
+ DRM_DEBUG_KMS("dma_addr = 0x%x\n", buffer->dma_addr);
+
return 0;
-err_kfree_buffer:
- kfree(buffer);
-err:
+err_free_pages:
+ kfree(buffer->pages);
+ buffer->pages = NULL;
+err_sg_free_table:
+ sg_free_table(buffer->sgt);
+err_free_sgt:
+ kfree(buffer->sgt);
+ buffer->sgt = NULL;
+err_release_handle:
+ drm_gem_handle_delete(file_priv, args->gem_handle);
+err_fini_buf:
+ exynos_drm_fini_buf(drm_dev, buffer);
+err_release_gem_obj:
drm_gem_object_release(&exynos_gem_obj->base);
kfree(exynos_gem_obj);
return ret;
@@ -1267,7 +1575,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret = 0;
@@ -1288,15 +1595,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (!exynos_gem_obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (!obj->map_list.map) {
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -1331,53 +1636,9 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
void exynos_drm_gem_close_object(struct drm_gem_object *obj,
struct drm_file *file)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct iommu_gem_map_params params;
- unsigned int type = 0;
-
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* check this gem object was mapped to iommu at here. TODO */
-
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- while (type < MAX_IOMMU_NR) {
- /*
- * unmap device address space already mapped to iommu.
- * - this codes would be performed with user gem release
- * request but in case of no request, when device driver
- * using iommu is released, also same things should be
- * performed by each driver.
- */
- if (exynos_gem_obj->iommu_info.mapped & (1 << type)) {
- dma_addr_t dma_addr;
- struct list_head *list;
-
- params.dev = exynos_gem_obj->iommu_info.devs[type];
- params.drm_dev = obj->dev;
- params.file = file;
- params.gem_obj = exynos_gem_obj;
- dma_addr = exynos_gem_obj->iommu_info.dma_addrs[type];
-
- exynos_drm_iommu_unmap_gem(&params,
- dma_addr,
- type);
-
- exynos_gem_obj->iommu_info.mapped &= ~(1 << type);
- exynos_gem_obj->iommu_info.dma_addrs[type] = 0;
-
- list = exynos_gem_obj->iommu_info.iommu_lists[type];
-
- /*
- * this gem has been unmapped from iommu so also
- * remove a iommu node from current device's own
- * iommu list.
- */
- exynos_drm_remove_iommu_list(list, exynos_gem_obj);
- }
-
- type++;
- }
+ /* TODO */
}
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 1227ae3..c6cd6e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -61,9 +61,10 @@ struct exynos_drm_iommu_info {
* exynos drm gem buffer structure.
*
* @kvaddr: kernel virtual address to allocated memory region.
+ * *userptr: user space address.
* @dma_addr: bus address(accessed by dma) to allocated memory region.
- * - this address could be physical address without IOMMU and
- * device address with IOMMU.
+ * @dev_addr: device address for IOMMU.
+ * @paddr: physical address to allocated buffer.
* @write: whether pages will be written to by the caller.
* @sgt: sg table to transfer page data.
* @pages: contain all pages to allocated memory region.
@@ -71,21 +72,22 @@ struct exynos_drm_iommu_info {
* @size: size of allocated memory region.
* @shared: indicate shared mfc memory region.
* (temporarily used and it should be removed later.)
- * @shared_refcount: a reference count for this buffer being shared with others.
* @pfnmap: indicate whether memory region from userptr is mmaped with
* VM_PFNMAP or not.
*/
struct exynos_drm_gem_buf {
struct device *dev;
void __iomem *kvaddr;
+ unsigned long userptr;
dma_addr_t dma_addr;
+ dma_addr_t dev_addr;
+ dma_addr_t paddr;
unsigned int write;
struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
unsigned long size;
bool shared;
- atomic_t shared_refcount;
bool pfnmap;
};
@@ -102,8 +104,12 @@ struct exynos_drm_gem_buf {
* or at framebuffer creation.
* @iommu_info: contain iommu mapping information to each device driver
* using its own iommu.
- * @size: total memory size to physically non-continuous memory region.
+ * @size: size requested from user, in bytes and this size is aligned
+ * in page unit.
+ * @packed_size: real size of the gem object, in bytes and
+ * this size isn't aligned in page unit.
* @flags: indicate memory type to allocated buffer and cache attruibute.
+ * @vmm: vmm object for iommu framework.
* @priv_handle: handle to specific buffer object.
* @priv_id: unique id to specific buffer object.
*
@@ -115,10 +121,10 @@ struct exynos_drm_gem_obj {
struct exynos_drm_gem_buf *buffer;
struct exynos_drm_iommu_info iommu_info;
unsigned long size;
+ unsigned long packed_size;
struct vm_area_struct *vma;
unsigned int flags;
- void *dma_buf_vmapping;
- int vmapping_count;
+ void *vmm;
unsigned int priv_handle;
unsigned int priv_id;
};
@@ -132,6 +138,9 @@ int register_buf_to_priv_mgr(struct exynos_drm_gem_obj *obj,
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+int exynos_drm_gem_user_limit_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
@@ -159,16 +168,15 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
*/
void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
- struct drm_file *file_priv);
+ struct drm_file *filp,
+ unsigned int *gem_obj);
/*
* put dma address from gem handle and this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be decreased.
*/
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv);
+void exynos_drm_gem_put_dma_addr(struct drm_device *dev, void *gem_obj);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -189,6 +197,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/* get buffer size to gem handle. */
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+ unsigned int gem_handle,
+ struct drm_file *file_priv);
+
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 0000000..d5e3a68
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1385 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include "drmP.h"
+#include "drm_backlight.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/cma.h>
+#include <plat/map-base.h>
+
+#include "regs-gsc.h"
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_ipp.h"
+#include "exynos_drm_gsc.h"
+
+/*
+ * GSC is stand for General SCaler and
+ * supports image scaler/rotator and input/output DMA operations.
+ * input DMA reads image data from the memory.
+ * output DMA writes image data to memory.
+ * GSC supports image rotation and image effect functions.
+ */
+
+#define GSC_MAX_DEVS 4
+#define GSC_MAX_SRC 8
+#define GSC_MAX_DST 32
+#define GSC_RESET_TIMEOUT 50
+#ifdef CONFIG_SLP_DISP_DEBUG
+#define GSC_MAX_REG 128
+#define GSC_BASE_REG(id) (0x13E00000 + (0x10000 * id))
+#endif
+#define GSC_CLK_RATE 166750000
+#define GSC_BUF_STOP 1
+#define GSC_BUF_START 2
+#define GSC_REG_SZ 32
+#define GSC_WIDTH_ITU_709 1280
+
+#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
+ struct gsc_context, ippdrv);
+#define gsc_read(offset) readl(ctx->regs + (offset));
+#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset));
+
+enum gsc_wb {
+ GSC_WB_NONE,
+ GSC_WB_A,
+ GSC_WB_B,
+};
+
+/*
+ * A structure of scaler.
+ *
+ * @range: narrow, wide.
+ * @pre_shfactor: pre sclaer shift factor.
+ * @pre_hratio: horizontal ratio of the prescaler.
+ * @pre_vratio: vertical ratio of the prescaler.
+ * @main_hratio: the main scaler's horizontal ratio.
+ * @main_vratio: the main scaler's vertical ratio.
+ */
+struct gsc_scaler {
+ bool range;
+ u32 pre_shfactor;
+ u32 pre_hratio;
+ u32 pre_vratio;
+ unsigned long main_hratio;
+ unsigned long main_vratio;
+};
+
+/*
+ * A structure of scaler capability.
+ *
+ * find user manual 49.2 features.
+ * @tile_w: tile mode or rotation width.
+ * @tile_h: tile mode or rotation height.
+ * @w: other cases width.
+ * @h: other cases height.
+ */
+struct gsc_capability {
+ /* tile or rotation */
+ u32 tile_w;
+ u32 tile_h;
+ /* other cases */
+ u32 w;
+ u32 h;
+};
+
+/*
+ * A structure of gsc context.
+ *
+ * @ippdrv: prepare initialization using ippdrv.
+ * @regs_res: register resources.
+ * @regs: memory mapped io registers.
+ * @lock: locking of operations.
+ * @gsc_clk: gsc clock.
+ * @sc: scaler infomations.
+ * @capa: scaler capability.
+ * @id: gsc id.
+ * @irq: irq number.
+ * @suspended: qos operations.
+ */
+struct gsc_context {
+ struct exynos_drm_ippdrv ippdrv;
+ struct resource *regs_res;
+ void __iomem *regs;
+ struct mutex lock;
+ struct clk *gsc_clk;
+ struct gsc_scaler sc;
+ struct gsc_capability *capa;
+ int id;
+ int irq;
+ bool suspended;
+};
+
+struct gsc_capability gsc51_capa[GSC_MAX_DEVS] = {
+ {
+ .tile_w = 2048,
+ .tile_h = 2048,
+ .w = 4800,
+ .h = 3344,
+ }, {
+ .tile_w = 2048,
+ .tile_h = 2048,
+ .w = 4800,
+ .h = 3344,
+ }, {
+ .tile_w = 2048,
+ .tile_h = 2048,
+ .w = 4800,
+ .h = 3344,
+ }, {
+ .tile_w = 2048,
+ .tile_h = 2048,
+ .w = 4800,
+ .h = 3344,
+ },
+};
+
+static int gsc_sw_reset(struct gsc_context *ctx)
+{
+ u32 cfg;
+ int count = GSC_RESET_TIMEOUT;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* s/w reset */
+ cfg = (GSC_SW_RESET_SRESET);
+ gsc_write(cfg, GSC_SW_RESET);
+
+ /* wait s/w reset complete */
+ while (count--) {
+ cfg = gsc_read(GSC_SW_RESET);
+ if (!cfg)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ if (cfg) {
+ DRM_ERROR("failed to reset gsc h/w.\n");
+ return -EBUSY;
+ }
+
+ /* display fifo reset */
+ cfg = readl(SYSREG_GSCBLK_CFG0);
+ /*
+ * GSCBLK Pixel asyncy FIFO S/W reset sequence
+ * set PXLASYNC_SW_RESET as 0 then,
+ * set PXLASYNC_SW_RESET as 1 again
+ */
+ cfg &= ~GSC_PXLASYNC_RST(ctx->id);
+ writel(cfg, SYSREG_GSCBLK_CFG0);
+ cfg |= GSC_PXLASYNC_RST(ctx->id);
+ writel(cfg, SYSREG_GSCBLK_CFG0);
+
+ /* pixel async reset */
+ cfg = readl(SYSREG_DISP1BLK_CFG);
+ /*
+ * DISPBLK1 FIFO S/W reset sequence
+ * set FIFORST_DISP1 as 0 then,
+ * set FIFORST_DISP1 as 1 again
+ */
+ cfg &= ~FIFORST_DISP1;
+ writel(cfg, SYSREG_DISP1BLK_CFG);
+ cfg |= FIFORST_DISP1;
+ writel(cfg, SYSREG_DISP1BLK_CFG);
+
+ /* reset sequence */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_IN_BASE_ADDR_MASK |
+ GSC_IN_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+ cfg |= (GSC_OUT_BASE_ADDR_MASK |
+ GSC_OUT_BASE_ADDR_PINGPONG(0));
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
+{
+ u32 gscblk_cfg;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
+
+ if (enable)
+ gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
+ GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
+ GSC_BLK_SW_RESET_WB_DEST(ctx->id);
+ else
+ gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
+
+ writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
+}
+
+static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
+ bool overflow, bool done)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
+ enable, overflow, done);
+
+ cfg = gsc_read(GSC_IRQ);
+ cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
+
+ if (enable) {
+ cfg |= GSC_IRQ_ENABLE;
+ if (overflow)
+ cfg &= ~GSC_IRQ_OR_MASK;
+ if (done)
+ cfg &= ~GSC_IRQ_FRMDONE_MASK;
+ } else
+ cfg &= ~GSC_IRQ_ENABLE;
+
+ gsc_write(cfg, GSC_IRQ);
+}
+
+static int gsc_set_planar_addr(struct drm_exynos_ipp_buf_info *buf_info,
+ u32 fmt, struct drm_exynos_sz *sz)
+{
+ dma_addr_t *y_addr = &buf_info->base[EXYNOS_DRM_PLANAR_Y];
+ dma_addr_t *cb_addr = &buf_info->base[EXYNOS_DRM_PLANAR_CB];
+ dma_addr_t *cr_addr = &buf_info->base[EXYNOS_DRM_PLANAR_CR];
+ uint64_t y_ofs, cb_ofs, cr_ofs;
+
+ /*
+ * ToDo: check the buffer size between gem allocated buffers
+ * and each planar size.
+ */
+ switch (fmt) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ y_ofs = sz->hsize * sz->vsize;
+ cb_ofs = y_ofs >> 1;
+ cr_ofs = 0;
+ break;
+ case DRM_FORMAT_NV12M:
+ y_ofs = ALIGN(ALIGN(sz->hsize, 16) *
+ ALIGN(sz->vsize, 16), SZ_2K);
+ cb_ofs = ALIGN(ALIGN(sz->hsize, 16) *
+ ALIGN(sz->vsize >> 1, 16), SZ_2K);
+ cr_ofs = 0;
+ break;
+ case DRM_FORMAT_NV12MT:
+ y_ofs = ALIGN(ALIGN(sz->hsize, 128) *
+ ALIGN(sz->vsize, 32), SZ_8K);
+ cb_ofs = ALIGN(ALIGN(sz->hsize, 128) *
+ ALIGN(sz->vsize >> 1, 32), SZ_8K);
+ cr_ofs = 0;
+ break;
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ case DRM_FORMAT_YUV420M:
+ y_ofs = sz->hsize * sz->vsize;
+ cb_ofs = cr_ofs = y_ofs >> 2;
+ break;
+ default:
+ y_ofs = cb_ofs = cr_ofs = 0;
+ break;
+ }
+
+ if (y_ofs && *y_addr) {
+ *cb_addr = *y_addr + y_ofs;
+
+ if (cb_ofs && *cb_addr)
+ *cr_addr = *cb_addr + cb_ofs;
+ }
+
+ return 0;
+}
+
+static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
+ GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
+ GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_IN_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_IN_XRGB8888;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_ORDER_LSB_Y |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_IN_YUV422_1P |
+ GSC_IN_YUV422_1P_OEDER_LSB_C |
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_IN_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+ GSC_IN_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return 0;
+}
+
+static int gsc_src_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return cfg ? 1 : 0;
+}
+
+static int gsc_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ u32 cfg;
+
+ /* ToDo: check width and height */
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, pos->x, pos->y, pos->w, pos->h);
+
+ /* pixel offset */
+ cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
+ GSC_SRCIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_SRCIMG_OFFSET);
+
+ /* cropped size */
+ cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
+ GSC_CROPPED_HEIGHT(img_pos.h));
+ gsc_write(cfg, GSC_CROPPED_SIZE);
+
+ DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
+ __func__, swap, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_SRCIMG_SIZE);
+ cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
+ GSC_SRCIMG_WIDTH_MASK);
+
+ cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
+ GSC_SRCIMG_HEIGHT(sz->vsize));
+
+ gsc_write(cfg, GSC_SRCIMG_SIZE);
+
+ config->sz = img_sz;
+ config->pos = img_pos;
+
+ return 0;
+}
+
+static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
+
+ return 0;
+}
+
+static int gsc_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ int ret;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ if (buf_id > GSC_MAX_SRC) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ case IPP_BUF_CTRL_DEQUEUE:
+ ret = gsc_set_planar_addr(buf_info, config->fmt, &config->sz);
+
+ if (ret) {
+ dev_err(dev, "failed to set plane addr.\n");
+ return ret;
+ }
+
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_IN_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_IN_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_IN_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_src_set_buf_seq(ctx, buf_id, buf_ctrl);
+}
+
+static struct exynos_drm_ipp_ops gsc_src_ops = {
+ .set_fmt = gsc_src_set_fmt,
+ .set_transf = gsc_src_set_transf,
+ .set_size = gsc_src_set_size,
+ .set_addr = gsc_src_set_addr,
+};
+
+static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
+ GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
+ GSC_OUT_TILE_TYPE_MASK | GSC_OUT_TILE_MODE);
+
+ switch (fmt) {
+ case DRM_FORMAT_RGB565:
+ cfg |= GSC_OUT_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ cfg |= GSC_OUT_XRGB8888;
+ break;
+ case DRM_FORMAT_YUYV:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_YVYU:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_ORDER_LSB_Y |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_UYVY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CBCR);
+ break;
+ case DRM_FORMAT_VYUY:
+ cfg |= (GSC_OUT_YUV422_1P |
+ GSC_OUT_YUV422_1P_OEDER_LSB_C |
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ cfg |= GSC_OUT_YUV420_3P;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV16:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+ GSC_OUT_YUV420_2P);
+ break;
+ case DRM_FORMAT_NV12MT:
+ cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
+ degree, flip);
+
+ cfg = gsc_read(GSC_IN_CON);
+ cfg &= ~GSC_IN_ROT_MASK;
+
+ switch (degree) {
+ case EXYNOS_DRM_DEGREE_0:
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_YFLIP;
+ break;
+ case EXYNOS_DRM_DEGREE_90:
+ if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
+ cfg |= GSC_IN_ROT_90_XFLIP;
+ else if (flip & EXYNOS_DRM_FLIP_VERTICAL)
+ cfg |= GSC_IN_ROT_90_YFLIP;
+ else
+ cfg |= GSC_IN_ROT_90;
+ break;
+ case EXYNOS_DRM_DEGREE_180:
+ cfg |= GSC_IN_ROT_180;
+ break;
+ case EXYNOS_DRM_DEGREE_270:
+ cfg |= GSC_IN_ROT_270;
+ break;
+ default:
+ dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
+ return -EINVAL;
+ }
+
+ gsc_write(cfg, GSC_IN_CON);
+
+ return cfg ? 1 : 0;
+}
+
+static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
+{
+ DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
+
+ if (src >= dst * 64) {
+ DRM_ERROR("failed to make ratio and shift.\n");
+ return -EINVAL;
+ } else if (src >= dst * 32) {
+ *ratio = 32;
+ *shift = 5;
+ } else if (src >= dst * 16) {
+ *ratio = 16;
+ *shift = 4;
+ } else if (src >= dst * 8) {
+ *ratio = 8;
+ *shift = 3;
+ } else if (src >= dst * 4) {
+ *ratio = 4;
+ *shift = 2;
+ } else if (src >= dst * 2) {
+ *ratio = 2;
+ *shift = 1;
+ } else {
+ *ratio = 1;
+ *shift = 0;
+ }
+
+ return 0;
+}
+
+static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
+ struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ u32 hfactor, vfactor;
+ int ret = 0;
+
+ ret = gsc_get_ratio_shift(src->w, dst->w, &sc->pre_hratio, &hfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
+ return ret;
+ }
+
+ ret = gsc_get_ratio_shift(src->h, dst->h, &sc->pre_vratio, &vfactor);
+ if (ret) {
+ dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
+ __func__, sc->pre_hratio, hfactor, sc->pre_vratio, vfactor);
+
+ sc->main_hratio = (src->w << 16) / (dst->w << hfactor);
+ sc->main_vratio = (src->h << 16) / (dst->h << vfactor);
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ sc->pre_shfactor = 10 - (hfactor + vfactor);
+ DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
+ sc->pre_shfactor);
+
+ cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
+ GSC_PRESC_H_RATIO(sc->pre_hratio) |
+ GSC_PRESC_V_RATIO(sc->pre_vratio));
+ gsc_write(cfg, GSC_PRE_SCALE_RATIO);
+
+ return ret;
+}
+
+static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
+{
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
+ __func__, sc->main_hratio, sc->main_vratio);
+
+ cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
+ gsc_write(cfg, GSC_MAIN_H_RATIO);
+
+ cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
+ gsc_write(cfg, GSC_MAIN_V_RATIO);
+}
+
+static int gsc_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct drm_exynos_pos img_pos = *pos;
+ struct drm_exynos_sz img_sz = *sz;
+ struct gsc_scaler *sc = &ctx->sc;
+ u32 cfg;
+
+ DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
+ __func__, swap, pos->x, pos->y, pos->w, pos->h);
+
+ if (swap) {
+ img_pos.w = pos->h;
+ img_pos.h = pos->w;
+ img_sz.hsize = sz->vsize;
+ img_sz.vsize = sz->hsize;
+ }
+
+ /* pixel offset */
+ cfg = (GSC_DSTIMG_OFFSET_X(img_pos.x) |
+ GSC_DSTIMG_OFFSET_Y(img_pos.y));
+ gsc_write(cfg, GSC_DSTIMG_OFFSET);
+
+ /* scaled size */
+ cfg = (GSC_SCALED_WIDTH(pos->w) | GSC_SCALED_HEIGHT(pos->h));
+ gsc_write(cfg, GSC_SCALED_SIZE);
+
+ DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
+ __func__, sz->hsize, sz->vsize);
+
+ /* original size */
+ cfg = gsc_read(GSC_DSTIMG_SIZE);
+ cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
+ GSC_DSTIMG_WIDTH_MASK);
+ cfg |= (GSC_DSTIMG_WIDTH(img_sz.hsize) |
+ GSC_DSTIMG_HEIGHT(img_sz.vsize));
+ gsc_write(cfg, GSC_DSTIMG_SIZE);
+
+ cfg = gsc_read(GSC_OUT_CON);
+ cfg &= ~GSC_OUT_RGB_TYPE_MASK;
+
+ if (pos->w >= GSC_WIDTH_ITU_709)
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_HD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_HD_NARROW;
+ else
+ if (sc->range)
+ cfg |= GSC_OUT_RGB_SD_WIDE;
+ else
+ cfg |= GSC_OUT_RGB_SD_NARROW;
+
+ gsc_write(cfg, GSC_OUT_CON);
+
+ return 0;
+}
+
+static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
+{
+ u32 cfg, i, buf_num = GSC_REG_SZ;
+ u32 mask = 0x00000001;
+
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ for (i = 0; i < GSC_REG_SZ; i++)
+ if (cfg & (mask << i))
+ buf_num--;
+
+ DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
+
+ return buf_num;
+}
+
+static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ bool masked;
+ u32 cfg;
+ u32 mask = 0x00000001 << buf_id;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ /* mask register set */
+ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
+
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ masked = false;
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ masked = true;
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
+ return -EINVAL;
+ }
+
+ /* sequence id */
+ cfg &= (~mask);
+ cfg |= masked << buf_id;
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ /* interrupt enable */
+ if (buf_ctrl == IPP_BUF_CTRL_QUEUE &&
+ gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
+ gsc_handle_irq(ctx, true, false, true);
+
+ return 0;
+}
+
+static int gsc_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ int ret;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]buf_ctrl[%d]\n", __func__,
+ buf_id, buf_ctrl);
+
+ if (buf_id > GSC_MAX_DST) {
+ dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
+ return -ENOMEM;
+ }
+
+ /* address register set */
+ switch (buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ case IPP_BUF_CTRL_DEQUEUE:
+ ret = gsc_set_planar_addr(buf_info, config->fmt, &config->sz);
+
+ if (ret) {
+ dev_err(dev, "failed to set plane addr.\n");
+ return ret;
+ }
+
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
+ GSC_OUT_BASE_ADDR_Y(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
+ GSC_OUT_BASE_ADDR_CB(buf_id));
+ gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
+ GSC_OUT_BASE_ADDR_CR(buf_id));
+ break;
+ default:
+ /* bypass */
+ break;
+ }
+
+ return gsc_dst_set_buf_seq(ctx, buf_id, buf_ctrl);
+}
+
+static struct exynos_drm_ipp_ops gsc_dst_ops = {
+ .set_fmt = gsc_dst_set_fmt,
+ .set_transf = gsc_dst_set_transf,
+ .set_size = gsc_dst_set_size,
+ .set_addr = gsc_dst_set_addr,
+};
+
+static int gsc_power_on(struct gsc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("%s:\n", __func__);
+
+ if (enable) {
+ clk_enable(ctx->gsc_clk);
+ /* ToDo : wb_b_clk */
+ ctx->suspended = false;
+ } else {
+ clk_disable(ctx->gsc_clk);
+ /* ToDo : wb_b_clk */
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
+static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
+{
+ struct gsc_context *ctx = dev_id;
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg, status;
+ int buf_id = 0;
+
+ DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
+
+ status = gsc_read(GSC_IRQ);
+ if (status & GSC_IRQ_STATUS_OR_IRQ) {
+ dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
+ ctx->id, status);
+ return IRQ_NONE;
+ }
+
+ if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
+ dev_err(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
+ ctx->id, status);
+ /* ToDo: Frame control */
+ }
+
+ if (list_empty(&ippdrv->event_list)) {
+ DRM_DEBUG_KMS("%s:event list empty.\n", __func__);
+
+ return IRQ_HANDLED;
+ }
+
+ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
+ buf_id = GSC_IN_CURR_GET_INDEX(cfg);
+ if (buf_id < 0)
+ return IRQ_HANDLED;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (gsc_dst_set_buf_seq(ctx, buf_id,
+ IPP_BUF_CTRL_DEQUEUE) < 0) {
+ DRM_ERROR("failed to dequeue.\n");
+
+ return IRQ_HANDLED;
+ }
+
+ ipp_send_event_handler(ippdrv, buf_id);
+
+ if (gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
+ gsc_handle_irq(ctx, false, false, true);
+
+ return IRQ_HANDLED;
+}
+
+static int gsc_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ /* ToDo: check valid using property information */
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return 0;
+}
+
+static int gsc_ippdrv_reset(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* reset h/w block */
+ ret = gsc_sw_reset(ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to reset hardware.\n");
+ return ret;
+ }
+
+ memset(&ctx->sc, 0x0, sizeof(ctx->sc));
+
+ return 0;
+}
+
+static int gsc_check_prepare(struct gsc_context *ctx)
+{
+ /* ToDo: check prepare using read register */
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ return 0;
+}
+
+static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_config *config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ u32 cfg;
+ int ret;
+ int enable = 1;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ ret = gsc_check_prepare(ctx);
+ if (ret) {
+ dev_err(dev, "failed to check prepare.\n");
+ return ret;
+ }
+
+ ippdrv->cmd = cmd;
+ gsc_handle_irq(ctx, true, false, true);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)enable);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ ret = -EINVAL;
+ dev_err(dev, "invalid operations.\n");
+ return ret;
+ }
+
+ ret = gsc_set_prescaler(ctx, &ctx->sc, &config->pos,
+ &config->pos);
+ if (ret) {
+ dev_err(dev, "failed to set precalser.\n");
+ return ret;
+ }
+
+ gsc_set_scaler(ctx, &ctx->sc);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg |= GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+
+ return 0;
+}
+
+static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+ u32 cfg;
+ int enable = 0;
+
+ DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
+
+ switch (cmd) {
+ case IPP_CMD_M2M:
+ /* bypass */
+ break;
+ case IPP_CMD_WB:
+ gsc_set_gscblk_fimd_wb(ctx, enable);
+ exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)enable);
+ break;
+ case IPP_CMD_OUTPUT:
+ default:
+ dev_err(dev, "invalid operations.\n");
+ break;
+ }
+
+ ippdrv->cmd = IPP_CMD_NONE;
+ gsc_handle_irq(ctx, false, false, true);
+
+ /* reset sequence */
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
+ gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
+
+ cfg = gsc_read(GSC_ENABLE);
+ cfg &= ~GSC_ENABLE_ON;
+ gsc_write(cfg, GSC_ENABLE);
+}
+
+#ifdef CONFIG_SLP_DISP_DEBUG
+static int gsc_read_reg(struct gsc_context *ctx, char *buf)
+{
+ u32 cfg;
+ int i;
+ int pos = 0;
+
+ pos += sprintf(buf+pos, "0x%.8x | ", GSC_BASE_REG(ctx->id));
+ for (i = 1; i < GSC_MAX_REG + 1; i++) {
+ cfg = gsc_read((i-1) * sizeof(u32));
+ pos += sprintf(buf+pos, "0x%.8x ", cfg);
+ if (i % 4 == 0)
+ pos += sprintf(buf+pos, "\n0x%.8x | ",
+ GSC_BASE_REG(ctx->id) + (i * sizeof(u32)));
+ }
+
+ pos += sprintf(buf+pos, "\n");
+
+ return pos;
+}
+
+static ssize_t show_read_reg(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ if (!ctx->regs) {
+ dev_err(dev, "failed to get current register.\n");
+ return -EINVAL;
+ }
+
+ return gsc_read_reg(ctx, buf);
+}
+
+static struct device_attribute device_attrs[] = {
+ __ATTR(read_reg, S_IRUGO, show_read_reg, NULL),
+};
+#endif
+
+static int __devinit gsc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx;
+ struct resource *res;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_gsc_pdata *pdata;
+ int ret = -EINVAL;
+#ifdef CONFIG_SLP_DISP_DEBUG
+ int i;
+#endif
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(dev, "no platform data specified.\n");
+ return -EINVAL;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clock control */
+ ctx->gsc_clk = clk_get(dev, "gscl");
+ if (IS_ERR(ctx->gsc_clk)) {
+ dev_err(dev, "failed to get gsc clock.\n");
+ ret = PTR_ERR(ctx->gsc_clk);
+ goto err_ctx;
+ }
+
+ /* resource memory */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "failed to find registers.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!ctx->regs_res) {
+ dev_err(dev, "failed to claim register region.\n");
+ ret = -ENOENT;
+ goto err_clk;
+ }
+
+ ctx->regs = ioremap(res->start, resource_size(res));
+ if (!ctx->regs) {
+ dev_err(dev, "failed to map registers.\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ /* resource irq */
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "failed to request irq resource.\n");
+ goto err_get_regs;
+ }
+
+ ctx->irq = res->start;
+ ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
+ IRQF_ONESHOT, "drm_gsc", ctx);
+ if (ret < 0) {
+ dev_err(dev, "failed to request irq.\n");
+ goto err_get_regs;
+ }
+
+ /* context initailization */
+ ctx->id = pdev->id;
+ ctx->capa = gsc51_capa;
+ if (!ctx->capa) {
+ dev_err(dev, "failed to get capability.\n");
+ goto err_get_irq;
+ }
+
+#ifdef CONFIG_SLP_DISP_DEBUG
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ ret = device_create_file(&(pdev->dev),
+ &device_attrs[i]);
+ if (ret)
+ break;
+ }
+
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to add sysfs entries\n");
+#endif
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+
+ ippdrv = &ctx->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
+ ippdrv->check_property = gsc_ippdrv_check_property;
+ ippdrv->reset = gsc_ippdrv_reset;
+ ippdrv->start = gsc_ippdrv_start;
+ ippdrv->stop = gsc_ippdrv_stop;
+
+ mutex_init(&ctx->lock);
+ platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = exynos_drm_ippdrv_register(ippdrv);
+ if (ret < 0) {
+ dev_err(dev, "failed to register drm gsc device.\n");
+ goto err_get_irq;
+ }
+
+ dev_info(&pdev->dev, "drm gsc registered successfully.\n");
+
+ return 0;
+
+err_get_irq:
+ free_irq(ctx->irq, ctx);
+err_get_regs:
+ iounmap(ctx->regs);
+err_req_region:
+ release_resource(ctx->regs_res);
+ kfree(ctx->regs_res);
+err_clk:
+ clk_put(ctx->gsc_clk);
+err_ctx:
+ kfree(ctx);
+ return ret;
+}
+
+static int __devexit gsc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gsc_context *ctx = get_gsc_context(dev);
+ struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
+
+ exynos_drm_ippdrv_unregister(ippdrv);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
+
+ free_irq(ctx->irq, ctx);
+ iounmap(ctx->regs);
+ release_resource(ctx->regs_res);
+ kfree(ctx->regs_res);
+
+ clk_put(ctx->gsc_clk);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gsc_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ if (pm_runtime_suspended(dev))
+ return 0;
+ /* ToDo */
+ return gsc_power_on(ctx, false);
+}
+
+static int gsc_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ if (!pm_runtime_suspended(dev))
+ return gsc_power_on(ctx, true);
+ /* ToDo */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int gsc_runtime_suspend(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
+ /* ToDo */
+ return gsc_power_on(ctx, false);
+}
+
+static int gsc_runtime_resume(struct device *dev)
+{
+ struct gsc_context *ctx = get_gsc_context(dev);
+
+ DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
+ /* ToDo */
+ return gsc_power_on(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops gsc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
+ SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
+};
+
+/* ToDo: need to check use case platform_device_id */
+struct platform_driver gsc_driver = {
+ .probe = gsc_probe,
+ .remove = __devexit_p(gsc_remove),
+ .driver = {
+ .name = "exynos-drm-gsc",
+ .owner = THIS_MODULE,
+ .pm = &gsc_pm_ops,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 0000000..546089e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GSC_H_
+#define _EXYNOS_DRM_GSC_H_
+
+/* ToDo */
+
+#endif /* _EXYNOS_DRM_GSC_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index f2ffa68..b0a8e1c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -37,13 +37,13 @@
static DEFINE_MUTEX(iommu_mutex);
struct exynos_iommu_ops {
- int (*setup)(struct device *dev);
- void (*cleanup)(struct device *dev);
- int (*activate)(struct device *dev);
- void (*deactivate)(struct device *dev);
- dma_addr_t (*map)(struct device *dev, struct scatterlist *sg,
+ void *(*setup)(unsigned long s_iova, unsigned long size);
+ void (*cleanup)(void *in_vmm);
+ int (*activate)(void *in_vmm, struct device *dev);
+ void (*deactivate)(void *in_vmm, struct device *dev);
+ dma_addr_t (*map)(void *in_vmm, struct scatterlist *sg,
off_t offset, size_t size);
- void (*unmap)(struct device *dev, dma_addr_t iova);
+ void (*unmap)(void *in_vmm, dma_addr_t iova);
};
static const struct exynos_iommu_ops iommu_ops = {
@@ -55,74 +55,20 @@ static const struct exynos_iommu_ops iommu_ops = {
.unmap = iovmm_unmap
};
-static bool check_iommu_map_params(struct iommu_gem_map_params *params)
-{
- if (!params) {
- DRM_ERROR("params is null.\n");
- return false;
- }
-
- if (!params->dev || !params->drm_dev || !params->file) {
- DRM_ERROR("invalid params.\n");
- return false;
- }
-
- return true;
-}
-
-void exynos_drm_remove_iommu_list(struct list_head *iommu_list,
- void *gem_obj)
-{
- struct iommu_info_node *im, *t_im;
-
- list_for_each_entry_safe(im, t_im, iommu_list, list) {
- if (im->gem_obj == gem_obj) {
- list_del(&im->list);
- kfree(im);
- im = NULL;
- break;
- }
- }
-
-}
-
-dma_addr_t exynos_drm_iommu_map_gem(struct iommu_gem_map_params *params,
- struct list_head *iommu_list,
- unsigned int gem_handle,
- enum iommu_types type)
+dma_addr_t exynos_drm_iommu_map_gem(struct drm_device *drm_dev,
+ struct drm_gem_object *obj)
{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
struct sg_table *sgt;
- struct iommu_info_node *node;
- struct exynos_drm_gem_obj *obj;
- dma_addr_t dma_addr;
-
- if (!is_iommu_type_valid(type)) {
- DRM_ERROR("invalid iommu type.\n");
- return 0;
- }
-
- if (!check_iommu_map_params(params))
- return 0;
-
- /* get gem object from specific gem framework. */
- obj = exynos_drm_gem_get_obj(params->drm_dev, gem_handle,
- params->file);
- if (IS_ERR(obj))
- return 0;
+ dma_addr_t dev_addr;
mutex_lock(&iommu_mutex);
- /*
- * if this gem object had already been mapped to iommu then
- * return dma address mapped before this time.
- */
- if (obj->iommu_info.mapped & (1 << type)) {
- DRM_DEBUG_KMS("already mapped to iommu");
- mutex_unlock(&iommu_mutex);
- return obj->iommu_info.dma_addrs[type];
- }
+ exynos_gem_obj = to_exynos_gem_obj(obj);
- sgt = obj->buffer->sgt;
+ buf = exynos_gem_obj->buffer;
+ sgt = buf->sgt;
/*
* if not using iommu, just return base address to physical
@@ -132,102 +78,64 @@ dma_addr_t exynos_drm_iommu_map_gem(struct iommu_gem_map_params *params,
mutex_unlock(&iommu_mutex);
return sg_dma_address(&sgt->sgl[0]);
}
- mutex_unlock(&iommu_mutex);
/*
- * allocate device address space for this driver and then
- * map all pages contained in sg list to iommu table.
+ * if a gem buffer was already mapped with iommu table then
+ * just return dev_addr;
+ *
+ * Note: device address is unique to system globally.
*/
- dma_addr = iommu_ops.map(params->dev, sgt->sgl, (off_t)0,
- (size_t)obj->size);
- if (!dma_addr) {
+ if (buf->dev_addr) {
mutex_unlock(&iommu_mutex);
- return dma_addr;
+ return buf->dev_addr;
}
- mutex_lock(&iommu_mutex);
-
/*
- * check map flag bit and device address mapped to iommu.
- * this data would be used to avoid duplicated mapping.
- */
- obj->iommu_info.mapped |= (1 << type);
- obj->iommu_info.dma_addrs[type] = dma_addr;
- obj->iommu_info.devs[type] = params->dev;
- obj->iommu_info.iommu_lists[type] = iommu_list;
-
- params->gem_obj = obj;
-
- /*
- * this gem object is referenced by this driver so
- * the object refcount should be increased.
+ * allocate device address space for this driver and then
+ * map all pages contained in sg list to iommu table.
*/
- drm_gem_object_reference(&obj->base);
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- DRM_ERROR("failed to allocate iommu node.\n");
- dma_addr = 0;
- goto err;
+ dev_addr = iommu_ops.map(exynos_gem_obj->vmm, sgt->sgl, (off_t)0,
+ (size_t)obj->size);
+ if (!dev_addr) {
+ mutex_unlock(&iommu_mutex);
+ return dev_addr;
}
- node->gem_obj = obj;
- node->dma_addr = dma_addr;
mutex_unlock(&iommu_mutex);
- list_add_tail(&node->list, iommu_list);
-
- return dma_addr;
-err:
- mutex_unlock(&iommu_mutex);
- iommu_ops.unmap(params->dev, dma_addr);
- return dma_addr;
+ return dev_addr;
}
-void exynos_drm_iommu_unmap_gem(struct iommu_gem_map_params *params,
- dma_addr_t dma_addr,
- enum iommu_types type)
+void exynos_drm_iommu_unmap_gem(struct drm_gem_object *obj)
{
- struct exynos_drm_gem_obj *obj;
-
- if (!iommu_ops.unmap)
- return;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct exynos_drm_gem_buf *buf;
- if (!is_iommu_type_valid(type)) {
- DRM_ERROR("invalid iommu type.\n");
+ if (!iommu_ops.unmap || !obj)
return;
- }
- if (!check_iommu_map_params(params))
- return;
-
- if (!params->gem_obj)
- return;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+ buf = exynos_gem_obj->buffer;
- obj = (struct exynos_drm_gem_obj *)params->gem_obj;
+ /* workaround */
+ usleep_range(15000, 20000);
mutex_lock(&iommu_mutex);
- if (!(obj->iommu_info.mapped & (1 << type))) {
- DRM_DEBUG_KMS("not already mapped to iommu so just return\n");
+
+ if (!buf->dev_addr) {
mutex_unlock(&iommu_mutex);
+ DRM_DEBUG_KMS("not mapped with iommu table.\n");
return;
}
- /* uncheck map flag bit. */
- obj->iommu_info.mapped &= ~(1 << type);
- obj->iommu_info.dma_addrs[type] = 0;
- mutex_unlock(&iommu_mutex);
-
- iommu_ops.unmap(params->dev, dma_addr);
+ if (exynos_gem_obj->vmm)
+ iommu_ops.unmap(exynos_gem_obj->vmm, buf->dev_addr);
- /*
- * drop this gem object refcount to release allocated buffer
- * and resources.
- */
- drm_gem_object_unreference_unlocked(&obj->base);
+ buf->dev_addr = 0;
+ mutex_unlock(&iommu_mutex);
}
-dma_addr_t exynos_drm_iommu_map(struct device *dev, dma_addr_t paddr,
+dma_addr_t exynos_drm_iommu_map(void *in_vmm, dma_addr_t paddr,
size_t size)
{
struct sg_table *sgt;
@@ -244,13 +152,13 @@ dma_addr_t exynos_drm_iommu_map(struct device *dev, dma_addr_t paddr,
sgt = kzalloc(sizeof(struct sg_table) * npages, GFP_KERNEL);
if (!sgt) {
- dev_err(dev, "failed to allocate sg table.\n");
+ DRM_ERROR("failed to allocate sg table.\n");
return dma_addr;
}
ret = sg_alloc_table(sgt, npages, GFP_KERNEL);
if (ret < 0) {
- dev_err(dev, "failed to initialize sg table.\n");
+ DRM_ERROR("failed to initialize sg table.\n");
goto err;
}
@@ -270,9 +178,9 @@ dma_addr_t exynos_drm_iommu_map(struct device *dev, dma_addr_t paddr,
* allocate device address space for this driver and then
* map all pages contained in sg list to iommu table.
*/
- dma_addr = iommu_ops.map(dev, sgt->sgl, (off_t)0, (size_t)size);
+ dma_addr = iommu_ops.map(in_vmm, sgt->sgl, (off_t)0, (size_t)size);
if (!dma_addr)
- dev_err(dev, "failed to map cmdlist pool.\n");
+ DRM_ERROR("failed to map cmdlist pool.\n");
sg_free_table(sgt);
err:
@@ -283,45 +191,45 @@ err:
}
-void exynos_drm_iommu_unmap(struct device *dev, dma_addr_t dma_addr)
+void exynos_drm_iommu_unmap(void *in_vmm, dma_addr_t dma_addr)
{
if (iommu_ops.unmap)
- iommu_ops.unmap(dev, dma_addr);
+ iommu_ops.unmap(in_vmm, dma_addr);
}
-int exynos_drm_iommu_setup(struct device *dev)
+void *exynos_drm_iommu_setup(unsigned long s_iova, unsigned long size)
{
/*
* allocate device address space to this driver and add vmm object
* to s5p_iovmm_list. please know that each iommu will use
* 1GB as its own device address apace.
*
- * the device address space : 0x80000000 ~ 0xA0000000
+ * the device address space : s_iova ~ s_iova + size
*/
if (iommu_ops.setup)
- return iommu_ops.setup(dev);
+ return iommu_ops.setup(s_iova, size);
- return 0;
+ return ERR_PTR(-EINVAL);
}
-int exynos_drm_iommu_activate(struct device *dev)
+int exynos_drm_iommu_activate(void *in_vmm, struct device *dev)
{
if (iommu_ops.activate)
- return iovmm_activate(dev);
+ return iovmm_activate(in_vmm, dev);
return 0;
}
-void exynos_drm_iommu_deactivate(struct device *dev)
+void exynos_drm_iommu_deactivate(void *in_vmm, struct device *dev)
{
if (iommu_ops.deactivate)
- iommu_ops.deactivate(dev);
+ iommu_ops.deactivate(in_vmm, dev);
}
-void exynos_drm_iommu_cleanup(struct device *dev)
+void exynos_drm_iommu_cleanup(void *in_vmm)
{
if (iommu_ops.cleanup)
- iommu_ops.cleanup(dev);
+ iommu_ops.cleanup(in_vmm);
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index aa267ba..35d923d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -26,54 +26,33 @@
#ifndef _EXYNOS_DRM_IOMMU_H_
#define _EXYNOS_DRM_IOMMU_H_
-enum iommu_types {
- IOMMU_FIMD = 0,
- IOMMU_HDMI,
- IOMMU_G2D,
- IOMMU_FIMC,
- IOMMU_G3D,
- IOMMU_ROTATOR,
- IOMMU_MASK = 0x3f
+struct exynos_iommu_gem_data {
+ unsigned int gem_handle_in;
+ void *gem_obj_out;
};
-struct iommu_gem_map_params {
- struct device *dev;
- struct drm_device *drm_dev;
- struct drm_file *file;
- void *gem_obj;
-};
-
-#define is_iommu_type_valid(t) (((1 << (t)) & ~(IOMMU_MASK)) ? false : true)
-
-void exynos_drm_remove_iommu_list(struct list_head *iommu_list,
- void *gem_obj);
-
/* get all pages to gem object and map them to iommu table. */
-dma_addr_t exynos_drm_iommu_map_gem(struct iommu_gem_map_params *params,
- struct list_head *iommu_list,
- unsigned int gem_handle,
- enum iommu_types type);
+dma_addr_t exynos_drm_iommu_map_gem(struct drm_device *drm_dev,
+ struct drm_gem_object *obj);
/* unmap device address space to gem object from iommu table. */
-void exynos_drm_iommu_unmap_gem(struct iommu_gem_map_params *params,
- dma_addr_t dma_addr,
- enum iommu_types type);
+void exynos_drm_iommu_unmap_gem(struct drm_gem_object *obj);
/* map physical memory region pointed by paddr to iommu table. */
-dma_addr_t exynos_drm_iommu_map(struct device *dev, dma_addr_t paddr,
+dma_addr_t exynos_drm_iommu_map(void *in_vmm, dma_addr_t paddr,
size_t size);
-/* unmap device address space pointed by dma_addr from iommu table. */
-void exynos_drm_iommu_unmap(struct device *dev, dma_addr_t dma_addr);
+/* unmap device address space pointed by dev_addr from iommu table. */
+void exynos_drm_iommu_unmap(void *in_vmm, dma_addr_t dev_addr);
/* setup device address space for device iommu. */
-int exynos_drm_iommu_setup(struct device *dev);
+void *exynos_drm_iommu_setup(unsigned long s_iova, unsigned long size);
-int exynos_drm_iommu_activate(struct device *dev);
+int exynos_drm_iommu_activate(void *in_vmm, struct device *dev);
-void exynos_drm_iommu_deactivate(struct device *dev);
+void exynos_drm_iommu_deactivate(void *in_vmm, struct device *dev);
/* clean up allocated device address space for device iommu. */
-void exynos_drm_iommu_cleanup(struct device *dev);
+void exynos_drm_iommu_cleanup(void *in_vmm);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 0000000..0293fbf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,1638 @@
+/*
+ * Copyright (C) 2012 Samsung Electronics Co.Ltd
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include "drmP.h"
+#include "drm_backlight.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/cma.h>
+#include <plat/map-base.h>
+
+#include <drm/exynos_drm.h>
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
+#include "exynos_drm_ipp.h"
+
+/*
+ * IPP is stand for Image Post Processing and
+ * supports image scaler/rotator and input/output DMA operations.
+ * using FIMC, GSC, Rotator, so on.
+ * IPP is integration device driver of same attribute h/w
+ */
+
+#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+/*
+ * A structure of event.
+ *
+ * @base: base of event.
+ * @event: ipp event.
+ */
+struct drm_exynos_ipp_send_event {
+ struct drm_pending_event base;
+ struct drm_exynos_ipp_event event;
+};
+
+/*
+ * A structure of command node.
+ *
+ * @list: list head to command queue information.
+ * @mem_list: list head to source,destination memory queue information.
+ * @property: property information.
+ */
+struct drm_exynos_ipp_cmd_node {
+ struct list_head list;
+ struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_property property;
+};
+
+/*
+ * A structure of memory node.
+ *
+ * @list: list head to memory queue information.
+ * @ops_id: id of operations.
+ * @prop_id: id of property.
+ * @buf_id: id of buffer.
+ * @buf_info: gem objects and dma address, size.
+ */
+struct drm_exynos_ipp_mem_node {
+ struct list_head list;
+ enum drm_exynos_ops_id ops_id;
+ u32 prop_id;
+ u32 buf_id;
+ struct drm_exynos_ipp_buf_info buf_info;
+};
+
+/*
+ * A structure of ipp context.
+ *
+ * @subdrv: prepare initialization using subdrv.
+ * @lock: locking of operations.
+ * @ipp_idr: ipp driver idr.
+ * @sched_event: schdule event list
+ * @sched_cmd: schdule command list
+ */
+struct ipp_context {
+ struct exynos_drm_subdrv subdrv;
+ struct mutex lock;
+ struct idr ipp_idr;
+ struct work_struct sched_event;
+ struct work_struct sched_cmd;
+};
+
+static LIST_HEAD(exynos_drm_ippdrv_list);
+static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
+
+int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ list_add_tail(&ippdrv->list, &exynos_drm_ippdrv_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_register);
+
+int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ippdrv)
+ return -EINVAL;
+
+ list_del(&ippdrv->list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ippdrv_unregister);
+
+int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, list) {
+ /* check idle state and dedicated state */
+ if (ippdrv->state == IPP_STATE_START &&
+ ippdrv->dedicated)
+ continue;
+
+ /* ToDo: get property */
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ippdrv->ipp_id);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ipp_get_property);
+
+static int ipp_create_id(struct idr *id_idr, void *obj, u32 *idp)
+{
+ int ret = -EINVAL;
+
+ /* ToDo: need spin_lock ? */
+
+again:
+ /* ensure there is space available to allocate a handle */
+ if (idr_pre_get(id_idr, GFP_KERNEL) == 0)
+ return -ENOMEM;
+
+ ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
+ if (ret == -EAGAIN)
+ goto again;
+
+ return ret;
+}
+
+static void *ipp_find_id(struct idr *id_idr, u32 id)
+{
+ void *obj;
+
+ /* ToDo: need spin_lock ? */
+
+ /* find object using handle */
+ obj = idr_find(id_idr, id);
+ if (obj == NULL)
+ return NULL;
+
+ return obj;
+}
+
+static struct exynos_drm_ippdrv
+ *ipp_find_driver(struct ipp_context *ctx,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ u32 ipp_id = property->ipp_id;
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
+
+ if (ipp_id) {
+ /* find ipp driver */
+ ippdrv = ipp_find_id(&ctx->ipp_idr, ippdrv->ipp_id);
+ if (!ippdrv) {
+ DRM_ERROR("not found ipp%d driver.\n", ipp_id);
+ return NULL;
+ }
+
+ /* check idle state and dedicated state */
+ if (ippdrv->state == IPP_STATE_START &&
+ ippdrv->dedicated) {
+ DRM_ERROR("used choose device.\n");
+ return NULL;
+ }
+
+ /* check property */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_ERROR("not support property.\n");
+ return NULL;
+ }
+
+ return ippdrv;
+ } else {
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, list) {
+ /* check idle state and dedicated state */
+ if (ippdrv->state == IPP_STATE_IDLE &&
+ ippdrv->dedicated)
+ continue;
+
+ /* check property */
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property)) {
+ DRM_DEBUG_KMS("not support property.\n");
+ continue;
+ }
+
+ return ippdrv;
+ }
+
+ DRM_ERROR("not support ipp driver operations.\n");
+ }
+
+ return NULL;
+}
+
+int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct device *dev = priv->dev;
+ struct ipp_context *ctx = get_ipp_context(dev);
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_property *property = data;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctx) {
+ DRM_ERROR("invalid context.\n");
+ return -EINVAL;
+ }
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ /* find ipp driver using ipp id */
+ ippdrv = ipp_find_driver(ctx, property);
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ /* allocate command node */
+ c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
+ if (!c_node) {
+ DRM_ERROR("failed to allocate map node.\n");
+ return -ENOMEM;
+ }
+
+ /* create property id */
+ ret = ipp_create_id(&ippdrv->prop_idr, c_node, &property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* stored property information and ippdrv in private data */
+ c_node->property = *property;
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++)
+ INIT_LIST_HEAD(&c_node->mem_list[i]);
+
+ /* make dedicated state without m2m */
+ if (property->cmd != IPP_CMD_M2M)
+ ippdrv->dedicated = true;
+ priv->ippdrv = ippdrv;
+
+ list_add_tail(&c_node->list, &ippdrv->cmd_list);
+
+ return 0;
+
+err_clear:
+ kfree(c_node);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ipp_set_property);
+
+static struct drm_exynos_ipp_cmd_node
+ *ipp_find_cmd_node(struct exynos_drm_ippdrv *ippdrv, u32 prop_id)
+{
+ struct drm_exynos_ipp_cmd_node *c_node;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ /* ToDo: same with find_cmd_node and find_id */
+
+ /* find ipp driver */
+ c_node = ipp_find_id(&ippdrv->prop_idr, prop_id);
+ if (!c_node) {
+ DRM_ERROR("not found property%d.\n", prop_id);
+ return NULL;
+ }
+
+ return c_node;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
+ struct drm_exynos_ipp_buf *buf)
+{
+ struct drm_exynos_ipp_mem_node *m_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf->buf_id);
+
+ /* find memory node entry */
+ list_for_each_entry(m_node, &c_node->mem_list[buf->ops_id], list) {
+ DRM_DEBUG_KMS("%s:count[%d]c_node[0x%x]\n",
+ __func__, count++, (int)c_node);
+
+ /* compare buffer id */
+ if (m_node->buf_id == buf->buf_id)
+ return m_node;
+ }
+
+ return NULL;
+}
+
+static struct drm_exynos_ipp_property
+ *ipp_find_property(struct exynos_drm_ippdrv *ippdrv, u32 prop_id)
+{
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ /* find command node entry */
+ list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]c_node[0x%x]\n",
+ __func__, count++, (int)c_node);
+
+ property = &c_node->property;
+ /* compare property id */
+ if (property->prop_id == prop_id)
+ return property;
+ }
+
+ return NULL;
+}
+
+static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret, i, swap = 0;
+
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
+
+ /* reset h/w block */
+ if (ippdrv->reset &&
+ ippdrv->reset(ippdrv->dev)) {
+ DRM_ERROR("failed to reset.\n");
+ return -EINVAL;
+ }
+
+ /* set source,destination operations */
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* ToDo: integrate property and config */
+ struct drm_exynos_ipp_config *config =
+ &property->config[i];
+
+ ops = ippdrv->ops[i];
+ if (!ops || !config) {
+ DRM_ERROR("not support ops and config.\n");
+ return -EINVAL;
+ }
+
+ /* set format */
+ if (ops->set_fmt) {
+ ret = ops->set_fmt(ippdrv->dev, config->fmt);
+ if (ret) {
+ DRM_ERROR("not support format.\n");
+ return ret;
+ }
+ }
+
+ /* set transform for rotation, flip */
+ if (ops->set_transf) {
+ swap = ops->set_transf(ippdrv->dev, config->degree,
+ config->flip);
+ if (swap < 0) {
+ DRM_ERROR("not support tranf.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* set size */
+ if (ops->set_size) {
+ ret = ops->set_size(ippdrv->dev, swap, &config->pos,
+ &config->sz);
+ if (ret) {
+ DRM_ERROR("not support size.\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_mem_node *node)
+{
+ struct exynos_drm_ipp_ops *ops = NULL;
+ int ret;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)node);
+
+ if (!node) {
+ DRM_ERROR("invalid queue node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, node->ops_id);
+
+ /* get operations callback */
+ ops = ippdrv->ops[node->ops_id];
+ if (!ops) {
+ DRM_DEBUG_KMS("not support ops.\n");
+ ret = -EIO;
+ return ret;
+ }
+
+ /* set address and enable irq */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &node->buf_info,
+ node->buf_id, IPP_BUF_CTRL_QUEUE);
+ if (ret) {
+ if (ret != -ENOMEM)
+ DRM_ERROR("failed to set addr.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_free_mem_node(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_mem_node *node)
+{
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)node);
+
+ if (!node) {
+ DRM_ERROR("invalid queue node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, node->ops_id);
+
+ /* put gem buffer */
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++) {
+ void *gem_obj = node->buf_info.gem_objs[i];
+
+ if (gem_obj)
+ exynos_drm_gem_put_dma_addr(drm_dev, gem_obj);
+ }
+
+ /* delete list in queue */
+ list_del(&node->list);
+ kfree(node);
+
+ return 0;
+}
+
+/* ToDo: Merge with stop property */
+static void ipp_free_cmd_list(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv)
+{
+ struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct list_head *head;
+ int ret, i, count = 0;
+
+ /* get command node entry */
+ list_for_each_entry_safe(c_node, tc_node,
+ &ippdrv->cmd_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]c_node[0x%x]\n",
+ __func__, count++, (int)c_node);
+
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ /* get memory node entry */
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv,
+ m_node);
+ if (ret)
+ DRM_ERROR("failed to free m node.\n");
+ }
+ }
+
+ /* delete list */
+ list_del(&c_node->list);
+ kfree(c_node);
+ }
+
+ return;
+}
+
+static int ipp_start_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv, u32 prop_id)
+{
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node, tm_node;
+ struct drm_exynos_ipp_property *property;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ /* find command node */
+ c_node = ipp_find_cmd_node(ippdrv, prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ /* get property */
+ property = &c_node->property;
+ if (property->prop_id != prop_id) {
+ DRM_ERROR("invalid property id.\n");
+ return -EINVAL;
+ }
+
+ /* set current property in ippdrv */
+ ippdrv->property = property;
+ ret = ipp_set_property(ippdrv, property);
+ if (ret) {
+ DRM_ERROR("failed to set property.\n");
+ ippdrv->property = NULL;
+ return ret;
+ }
+
+ /* check command type */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("bypass empty list.\n");
+ return 0;
+ }
+
+ /* get first entry */
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_DEBUG_KMS("failed to get node.\n");
+ ret = -EFAULT;
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
+ __func__, (int)m_node);
+
+ /* must be set 0 src buffer id in m2m */
+ if (i == EXYNOS_DRM_OPS_SRC) {
+ tm_node = *m_node;
+ tm_node.buf_id = 0;
+ m_node = &tm_node;
+ }
+
+ /* set memory node to ippdrv */
+ ret = ipp_set_mem_node(ippdrv, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ /* get list entry */
+ list_for_each_entry(m_node, head, list) {
+ /* set memory node to ippdrv */
+ ret = ipp_set_mem_node(ippdrv, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ /* get list entry */
+ list_for_each_entry(m_node, head, list) {
+ /* set memory node to ippdrv */
+ ret = ipp_set_mem_node(ippdrv, m_node);
+ if (ret) {
+ DRM_ERROR("failed to set m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* start operations */
+ if (ippdrv->start) {
+ ret = ippdrv->start(ippdrv->dev, property->cmd);
+ if (ret) {
+ DRM_ERROR("failed to start ops.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ipp_stop_property(struct drm_device *drm_dev,
+ struct exynos_drm_ippdrv *ippdrv, u32 prop_id)
+{
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node, *tm_node;
+ struct drm_exynos_ipp_property *property;
+ enum drm_exynos_ipp_cmd cmd;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
+
+ /* find command node */
+ c_node = ipp_find_cmd_node(ippdrv, prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return -EINVAL;
+ }
+
+ /* get property */
+ property = &c_node->property;
+ if (property->prop_id != prop_id) {
+ DRM_ERROR("invalid property id.\n");
+ return -EINVAL;
+ }
+
+ /* copy current command for memory list */
+ cmd = property->cmd;
+
+ /* stop operations */
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, property->cmd);
+
+ /* check command type */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ /* get list entry */
+ list_for_each_entry_safe(m_node, tm_node,
+ head, list) {
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to free m node.\n");
+ return ret;
+ }
+ }
+ }
+ break;
+ case IPP_CMD_WB:
+ /* destination memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
+
+ /* get list entry */
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv, m_node);
+ if (ret) {
+ DRM_ERROR("failed to free m node.\n");
+ return ret;
+ }
+ }
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ /* get list entry */
+ list_for_each_entry_safe(m_node, tm_node, head, list) {
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv, m_node);
+ if (ret) {
+ DRM_ERROR("failed to free m node.\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ ret = -EINVAL;
+ return ret;
+ }
+
+ /* delete list */
+ list_del(&c_node->list);
+ kfree(c_node);
+
+ return 0;
+}
+
+static void ipp_free_event(struct drm_pending_event *event)
+{
+ kfree(event);
+}
+
+static int ipp_make_event(struct drm_device *drm_dev, struct drm_file *file,
+ struct exynos_drm_ippdrv *ippdrv, struct drm_exynos_ipp_buf *buf)
+{
+ struct drm_exynos_ipp_send_event *e;
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
+ buf->ops_id, buf->buf_id);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ DRM_ERROR("failed to allocate event.\n");
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ file->event_space += sizeof(e->event);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ return -ENOMEM;
+ }
+
+ DRM_DEBUG_KMS("%s:e[0x%x]\n", __func__, (int)e);
+
+ /* make event */
+ e->event.base.type = DRM_EXYNOS_IPP_EVENT;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = buf->user_data;
+ e->event.buf_id[EXYNOS_DRM_OPS_DST] = buf->buf_id;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = ipp_free_event;
+
+ list_add_tail(&e->base.link, &ippdrv->event_list);
+
+ return 0;
+}
+
+static struct drm_exynos_ipp_mem_node
+ *ipp_make_mem_node(struct drm_device *drm_dev,
+ struct drm_file *file,
+ struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_buf *buf)
+{
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf_info buf_info;
+ void *addr;
+ unsigned long size;
+ int i;
+
+ m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
+ if (!m_node) {
+ DRM_ERROR("failed to allocate queue node.\n");
+ return NULL;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* find command node */
+ c_node = ipp_find_cmd_node(ippdrv, buf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ goto err_clear;
+ }
+
+ /* operations, buffer id */
+ m_node->ops_id = buf->ops_id;
+ m_node->prop_id = buf->prop_id;
+ m_node->buf_id = buf->buf_id;
+
+ DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
+ (int)m_node, buf->ops_id);
+ DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
+ buf->prop_id, m_node->buf_id);
+
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++) {
+ unsigned int gem_obj;
+
+ DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
+ i, buf->handle[i]);
+
+ /* get dma address by handle */
+ if (buf->handle[i] != 0) {
+ addr = exynos_drm_gem_get_dma_addr(drm_dev,
+ buf->handle[i], file, &gem_obj);
+ if (!addr) {
+ DRM_ERROR("failed to get addr.\n");
+ goto err_clear;
+ }
+
+ size = exynos_drm_gem_get_size(drm_dev,
+ buf->handle[i], file);
+ if (!size) {
+ DRM_ERROR("failed to get size.\n");
+ goto err_clear;
+ }
+
+ buf_info.gem_objs[i] = (void *)gem_obj;
+ buf_info.base[i] = *(dma_addr_t *) addr;
+ buf_info.size[i] = (uint64_t) size;
+ }
+ }
+
+ m_node->buf_info = buf_info;
+ list_add_tail(&m_node->list, &c_node->mem_list[buf->ops_id]);
+
+ return m_node;
+
+err_clear:
+ kfree(m_node);
+
+ return NULL;
+}
+
+int exynos_drm_ipp_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = priv->ippdrv;
+ struct drm_exynos_ipp_buf *buf = data;
+ struct exynos_drm_ipp_ops *ops = NULL;
+ struct drm_exynos_ipp_send_event *e, *te;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node = NULL, *tm_node;
+ struct drm_exynos_ipp_property *property;
+ struct drm_exynos_ipp_buf_info buf_info;
+ struct list_head *head;
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!buf) {
+ DRM_ERROR("invalid buf parameter.\n");
+ return -EINVAL;
+ }
+
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ if (buf->ops_id >= EXYNOS_DRM_OPS_MAX) {
+ DRM_ERROR("invalid ops parameter.\n");
+ return -EINVAL;
+ }
+
+ ops = ippdrv->ops[buf->ops_id];
+ if (!ops) {
+ DRM_ERROR("failed to get ops.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:ops_id[%s]buf_id[%d]buf_ctrl[%d]\n",
+ __func__, buf->ops_id ? "dst" : "src",
+ buf->buf_id, buf->buf_ctrl);
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* find command node */
+ c_node = ipp_find_cmd_node(ippdrv, buf->prop_id);
+ if (!c_node) {
+ DRM_ERROR("failed to get command node.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+ /* get property */
+ property = &c_node->property;
+ if (!property) {
+ DRM_ERROR("invalid property parameter.\n");
+ goto err_clear;
+ }
+
+ /* buffer control */
+ switch (buf->buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ /* make memory node */
+ m_node = ipp_make_mem_node(drm_dev, file, ippdrv, buf);
+ if (!m_node) {
+ DRM_ERROR("failed to make queue node.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+ buf_info = m_node->buf_info;
+
+ if (pm_runtime_suspended(ippdrv->dev))
+ break;
+
+ /* set address */
+ if (property->cmd != IPP_CMD_M2M && ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &buf_info, buf->buf_id,
+ buf->buf_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ /* free node */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[buf->ops_id], list) {
+ if (m_node->buf_id == buf->buf_id &&
+ m_node->ops_id == buf->ops_id) {
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv,
+ m_node);
+ if (ret) {
+ DRM_ERROR("failed to free m node.\n");
+ goto err_clear;
+ }
+ }
+ }
+
+ if (pm_runtime_suspended(ippdrv->dev)) {
+ DRM_ERROR("suspended:invalid operations.\n");
+ ret = -EINVAL;
+ goto err_clear;
+ }
+
+ /* clear address */
+ if (ops->set_addr) {
+ ret = ops->set_addr(ippdrv->dev, &buf_info, buf->buf_id,
+ buf->buf_ctrl);
+ if (ret) {
+ DRM_ERROR("failed to set addr.\n");
+ goto err_clear;
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("invalid buffer control.\n");
+ return -EINVAL;
+ }
+
+ /* destination buffer need event control */
+ if (buf->ops_id == EXYNOS_DRM_OPS_DST) {
+ switch (buf->buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ /* make event */
+ ret = ipp_make_event(drm_dev, file, ippdrv, buf);
+ if (ret) {
+ DRM_ERROR("failed to make event.\n");
+ goto err_clear;
+ }
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ /* free event */
+ list_for_each_entry_safe(e, te,
+ &ippdrv->event_list, base.link) {
+ if (e->event.buf_id[EXYNOS_DRM_OPS_DST] ==
+ buf->buf_id) {
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+ }
+ break;
+ default:
+ /* no action */
+ break;
+ }
+ }
+
+ /*
+ * If set source, destination buffer and enable pm
+ * m2m operations need start operations in queue
+ */
+ if (property->cmd == IPP_CMD_M2M) {
+ /* start operations was not set */
+ if (pm_runtime_suspended(ippdrv->dev)) {
+ DRM_DEBUG_KMS("suspended state.\n");
+ return 0;
+ }
+
+ /* check source/destination buffer status */
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ /* check list empty */
+ if (list_empty(head)) {
+ DRM_DEBUG_KMS("list empty.\n");
+ return 0;
+ }
+ }
+
+ /* check property id and buffer property id */
+ if (property->prop_id != buf->prop_id) {
+ DRM_ERROR("invalid property id.\n");
+ goto err_clear;
+ }
+
+ /* start property */
+ ret = ipp_start_property(drm_dev, ippdrv, property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to start property.\n");
+ goto err_clear;
+ }
+ }
+
+ return 0;
+
+err_clear:
+ DRM_ERROR("%s:failed to set buf.\n", __func__);
+
+ /* delete list */
+ list_for_each_entry_safe(m_node, tm_node,
+ &c_node->mem_list[buf->ops_id], list) {
+ if (m_node->buf_id == buf->buf_id &&
+ m_node->ops_id == buf->ops_id) {
+ list_del(&m_node->list);
+ kfree(m_node);
+ }
+ }
+
+ /* put gem buffer */
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++) {
+ void *gem_obj = buf_info.gem_objs[i];
+
+ if (gem_obj)
+ exynos_drm_gem_put_dma_addr(drm_dev,
+ gem_obj);
+ }
+
+ /* free address */
+ switch (buf->buf_ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ case IPP_BUF_CTRL_DEQUEUE:
+ if (pm_runtime_suspended(ippdrv->dev)) {
+ DRM_ERROR("suspended:invalid error operations.\n");
+ return -EINVAL;
+ }
+
+ /* clear base address for error handling */
+ memset(&buf_info, 0x0, sizeof(buf_info));
+
+ /* don't need check error case */
+ if (ops->set_addr)
+ ops->set_addr(ippdrv->dev, &buf_info,
+ buf->buf_id, IPP_BUF_CTRL_DEQUEUE);
+ break;
+ default:
+ /* no action */
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ipp_buf);
+
+int exynos_drm_ipp_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv = priv->ippdrv;
+ struct drm_exynos_ipp_ctrl *ctrl = data;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ if (!ctrl) {
+ DRM_ERROR("invalid control parameter.\n");
+ return -EINVAL;
+ }
+
+ if (!ippdrv) {
+ DRM_ERROR("failed to get ipp driver.\n");
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_KMS("%s:use[%d]\n", __func__, ctrl->use);
+
+ /* ToDo: expand ctrl operation */
+
+ /*
+ * start/stop operations,
+ * set use to 1, you can use start operations
+ * other case is stop opertions
+ */
+ if (ctrl->use) {
+ if (pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_get_sync(ippdrv->dev);
+
+ ret = ipp_start_property(drm_dev, ippdrv, ctrl->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to start property.\n");
+ goto err_clear;
+ }
+
+ ippdrv->state = IPP_STATE_START;
+ } else {
+ ippdrv->state = IPP_STATE_STOP;
+ ippdrv->dedicated = false;
+ ippdrv->property = NULL;
+
+ ret = ipp_stop_property(drm_dev, ippdrv, ctrl->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to stop property.\n");
+ goto err_clear;
+ }
+
+ if (!pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+
+ return 0;
+
+err_clear:
+ /*
+ * ToDo: register clear if needed
+ * If failed choose device using property. then
+ * revert register clearing if needed
+ */
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ipp_ctrl);
+
+int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(
+ &exynos_drm_ippnb_list, nb);
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ippnb_register);
+
+int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(
+ &exynos_drm_ippnb_list, nb);
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ippnb_unregister);
+
+int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return blocking_notifier_call_chain(
+ &exynos_drm_ippnb_list, val, v);
+}
+EXPORT_SYMBOL_GPL(exynos_drm_ippnb_send_event);
+
+void ipp_send_event_handler(struct exynos_drm_ippdrv *ippdrv,
+ int buf_id)
+{
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct drm_exynos_ipp_property *property = ippdrv->property;
+ struct drm_exynos_ipp_cmd_node *c_node;
+ struct drm_exynos_ipp_mem_node *m_node;
+ struct drm_exynos_ipp_buf buf;
+ struct drm_exynos_ipp_send_event *e;
+ struct list_head *head;
+ struct timeval now;
+ unsigned long flags;
+ u32 q_buf_id[EXYNOS_DRM_OPS_MAX] = {0, };
+ int ret, i;
+
+ DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
+
+ if (!drm_dev) {
+ DRM_ERROR("failed to get drm_dev.\n");
+ return;
+ }
+
+ if (list_empty(&ippdrv->event_list)) {
+ DRM_ERROR("event list is empty.\n");
+ return;
+ }
+
+ if (!property) {
+ DRM_ERROR("failed to get property.\n");
+ return;
+ }
+
+ /* find command node */
+ c_node = ipp_find_cmd_node(ippdrv, property->prop_id);
+ if (!c_node) {
+ DRM_ERROR("invalid command node list.\n");
+ return;
+ }
+
+ /* check command type */
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* source/destination memory list */
+ head = &c_node->mem_list[i];
+
+ if (list_empty(head)) {
+ DRM_ERROR("empty list.\n");
+ return;
+ }
+
+ /* get first entry */
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty list.\n");
+ return;
+ }
+
+ q_buf_id[i] = m_node->buf_id;
+
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv, m_node);
+ if (ret)
+ DRM_ERROR("failed to free m node.\n");
+ }
+ break;
+ case IPP_CMD_WB:
+ /* clear buf for finding */
+ memset(&buf, 0x0, sizeof(buf));
+ buf.ops_id = EXYNOS_DRM_OPS_DST;
+ buf.buf_id = buf_id;
+
+ /* get memory node entry */
+ m_node = ipp_find_mem_node(c_node, &buf);
+ if (!m_node) {
+ DRM_ERROR("empty list.\n");
+ return;
+ }
+
+ q_buf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
+
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv, m_node);
+ if (ret)
+ DRM_ERROR("failed to free m node.\n");
+ break;
+ case IPP_CMD_OUTPUT:
+ /* source memory list */
+ head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
+
+ /* get first entry */
+ m_node = list_first_entry(head,
+ struct drm_exynos_ipp_mem_node, list);
+ if (!m_node) {
+ DRM_ERROR("empty list.\n");
+ return;
+ }
+
+ q_buf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
+
+ /* free memory node to ippdrv */
+ ret = ipp_free_mem_node(drm_dev, ippdrv, m_node);
+ if (ret)
+ DRM_ERROR("failed to free m node.\n");
+ break;
+ default:
+ DRM_ERROR("invalid operations.\n");
+ return;
+ }
+
+ /* ToDo: Fix buffer id */
+ if (q_buf_id[EXYNOS_DRM_OPS_DST] != buf_id)
+ DRM_ERROR("failed to match buffer id %d, %d.\n",
+ q_buf_id[EXYNOS_DRM_OPS_DST], buf_id);
+
+ /* get first event entry */
+ e = list_first_entry(&ippdrv->event_list,
+ struct drm_exynos_ipp_send_event, base.link);
+
+ do_gettimeofday(&now);
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+ e->event.prop_id = property->prop_id;
+
+ /* set buffer id about source destination */
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ /* ToDo: compare index. If needed */
+ e->event.buf_id[i] = q_buf_id[i];
+ }
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ /* ToDo: Need to handle the property queue */
+
+ switch (property->cmd) {
+ case IPP_CMD_M2M:
+ for (i = 0; i < EXYNOS_DRM_OPS_MAX; i++) {
+ head = &c_node->mem_list[i];
+ if (list_empty(head))
+ return;
+ }
+
+ ret = ipp_start_property(drm_dev, ippdrv, property->prop_id);
+ if (ret) {
+ DRM_ERROR("failed to start property.\n");
+ return;
+ }
+ break;
+ case IPP_CMD_WB:
+ case IPP_CMD_OUTPUT:
+ default:
+ break;
+ }
+
+ DRM_DEBUG_KMS("%s:finish cmd[%d]\n", __func__, property->cmd);
+}
+
+static void ipp_sched_event(struct work_struct *sched_event)
+{
+ struct ipp_context *ctx = container_of(sched_event,
+ struct ipp_context, sched_event);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+ /* ToDo:send event handler */
+}
+
+static void ipp_sched_cmd(struct work_struct *sched_cmd)
+{
+ struct ipp_context *ctx = container_of(sched_cmd,
+ struct ipp_context, sched_cmd);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+ /* ToDo: schedule next work */
+}
+
+static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_private *drm_priv = drm_dev->dev_private;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, list) {
+ ippdrv->drm_dev = drm_dev;
+
+ /* ToDo: need move open ? */
+
+ /* init prop idr */
+ idr_init(&ippdrv->prop_idr);
+
+ /* check iommu use case */
+ if (ippdrv->iommu_used) {
+ ret = exynos_drm_iommu_activate(drm_priv->vmm,
+ ippdrv->dev);
+ if (ret) {
+ DRM_ERROR("failed to activate iommu\n");
+ goto err_clear;
+ }
+ }
+ }
+
+ return 0;
+
+err_clear:
+ /* get ipp driver entry */
+ list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, list)
+ if ((ippdrv->iommu_used) && (drm_priv->vmm))
+ exynos_drm_iommu_deactivate(drm_priv->vmm, ippdrv->dev);
+
+ return ret;
+}
+
+static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
+{
+ struct exynos_drm_ippdrv *ippdrv;
+ struct exynos_drm_private *drm_priv = drm_dev->dev_private;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, list) {
+
+ /* ToDo: need move close ? */
+
+ /* remove,destroy property idr */
+ idr_remove_all(&ippdrv->prop_idr);
+ idr_destroy(&ippdrv->prop_idr);
+
+ if (drm_priv->vmm)
+ exynos_drm_iommu_deactivate(drm_priv->vmm, ippdrv->dev);
+
+ ippdrv->drm_dev = NULL;
+ exynos_drm_ippdrv_unregister(ippdrv);
+ }
+
+ /* ToDo: free notifier callback list if needed */
+}
+
+static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv;
+ struct exynos_drm_ippdrv *ippdrv;
+ int count = 0;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* ToDo: multi device open */
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ DRM_ERROR("failed to allocate priv.\n");
+ return -ENOMEM;
+ }
+
+ priv->dev = dev;
+ file_priv->ipp_priv = priv;
+ INIT_LIST_HEAD(&priv->event_list);
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, list) {
+ DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
+ count++, (int)ippdrv);
+
+ /* check idle state */
+ if (ippdrv->state != IPP_STATE_IDLE)
+ continue;
+
+ INIT_LIST_HEAD(&ippdrv->event_list);
+ INIT_LIST_HEAD(&ippdrv->cmd_list);
+ list_splice_init(&priv->event_list, &ippdrv->event_list);
+ }
+
+ return 0;
+}
+
+static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
+ struct drm_file *file)
+{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
+ struct exynos_drm_ippdrv *ippdrv_cur = priv->ippdrv;
+ struct exynos_drm_ippdrv *ippdrv;
+ struct drm_exynos_ipp_send_event *e, *te;
+ int count;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* ToDo: for multi device close */
+
+ /* get ipp driver entry */
+ list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, list) {
+ DRM_DEBUG_KMS("%s:ippdrv_cur[0x%x]ippdrv[0x%x]\n",
+ __func__, (int)ippdrv_cur, (int)ippdrv);
+
+ DRM_DEBUG_KMS("%s:state[%d]dedicated[%d]\n", __func__,
+ ippdrv->state, ippdrv->dedicated);
+
+ /* current used ippdrv stop needed */
+ if (ippdrv_cur && ippdrv_cur == ippdrv) {
+ if (ippdrv->state == IPP_STATE_START) {
+ if (ippdrv->stop)
+ ippdrv->stop(ippdrv->dev, ippdrv->cmd);
+
+ if (!pm_runtime_suspended(ippdrv->dev))
+ pm_runtime_put_sync(ippdrv->dev);
+ }
+
+ ippdrv->state = IPP_STATE_IDLE;
+ ippdrv->dedicated = false;
+ }
+
+ /* check idle state */
+ if (ippdrv->state != IPP_STATE_IDLE)
+ continue;
+
+ /* free event */
+ count = 0;
+ list_for_each_entry_safe(e, te,
+ &ippdrv->event_list, base.link) {
+ DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
+ __func__, count++, (int)e);
+
+ /* delete list */
+ list_del(&e->base.link);
+ kfree(e);
+ }
+
+ /* free property list */
+ ipp_free_cmd_list(drm_dev, ippdrv);
+ /* ToDo: How can get current fd property ? */
+ }
+
+ kfree(file_priv->ipp_priv);
+
+ return;
+}
+
+static int __devinit ipp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipp_context *ctx;
+ struct exynos_drm_subdrv *subdrv;
+ struct exynos_drm_ippdrv *tippdrv;
+ int ret = -EINVAL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* init ioctl lock */
+ mutex_init(&ctx->lock);
+ /* init event, cmd work thread */
+ INIT_WORK(&ctx->sched_event, ipp_sched_event);
+ INIT_WORK(&ctx->sched_cmd, ipp_sched_cmd);
+ /* init ipp driver idr */
+ idr_init(&ctx->ipp_idr);
+
+ /* get ipp driver entry */
+ list_for_each_entry(tippdrv, &exynos_drm_ippdrv_list, list) {
+ /* create ipp id */
+ ret = ipp_create_id(&ctx->ipp_idr, tippdrv, &tippdrv->ipp_id);
+ if (ret) {
+ DRM_ERROR("failed to create id.\n");
+ goto err_clear;
+ }
+
+ DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, tippdrv->ipp_id);
+
+ if (tippdrv->ipp_id == 0)
+ DRM_ERROR("failed to get ipp_id[%d]\n",
+ tippdrv->ipp_id);
+ }
+
+ /* set sub driver informations */
+ subdrv = &ctx->subdrv;
+ subdrv->dev = dev;
+ subdrv->probe = ipp_subdrv_probe;
+ subdrv->remove = ipp_subdrv_remove;
+ subdrv->open = ipp_subdrv_open;
+ subdrv->close = ipp_subdrv_close;
+
+ /* set driver data */
+ platform_set_drvdata(pdev, ctx);
+
+ /* register sub driver */
+ ret = exynos_drm_subdrv_register(subdrv);
+ if (ret < 0) {
+ DRM_ERROR("failed to register drm ipp device.\n");
+ goto err_clear;
+ }
+
+ dev_info(&pdev->dev, "drm ipp registered successfully.\n");
+
+ return 0;
+
+err_clear:
+ kfree(ctx);
+
+ return ret;
+}
+
+static int __devexit ipp_remove(struct platform_device *pdev)
+{
+ struct ipp_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ /* unregister sub driver */
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+ /* remove,destroy ipp idr */
+ idr_remove_all(&ctx->ipp_idr);
+ idr_destroy(&ctx->ipp_idr);
+
+ kfree(ctx);
+
+ return 0;
+}
+
+
+struct platform_driver ipp_driver = {
+ .probe = ipp_probe,
+ .remove = __devexit_p(ipp_remove),
+ .driver = {
+ .name = "exynos-drm-ipp",
+ .owner = THIS_MODULE,
+ },
+};
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 0000000..458f747
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ * Eunchul Kim <chulspro.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IPP_H_
+#define _EXYNOS_DRM_IPP_H_
+
+#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
+#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
+#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
+
+/* definition of state */
+enum drm_exynos_ipp_state {
+ IPP_STATE_IDLE,
+ IPP_STATE_START,
+ IPP_STATE_STOP,
+};
+
+/*
+ * A structure of buffer information.
+ *
+ * @gem_objs: Y, Cb, Cr each gem object.
+ * @base: Y, Cb, Cr each planar address.
+ * @size: Y, Cb, Cr each planar size.
+ */
+struct drm_exynos_ipp_buf_info {
+ void *gem_objs[EXYNOS_DRM_PLANAR_MAX];
+ dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
+ uint64_t size[EXYNOS_DRM_PLANAR_MAX];
+};
+
+/*
+ * A structure of source,destination operations.
+ *
+ * @set_fmt: set format of image.
+ * @set_transf: set transform(rotations, flip).
+ * @set_size: set size of region.
+ * @set_addr: set address for dma.
+ */
+struct exynos_drm_ipp_ops {
+ int (*set_fmt)(struct device *dev, u32 fmt);
+ int (*set_transf)(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip);
+ int (*set_size)(struct device *dev, int swap,
+ struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
+ int (*set_addr)(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
+ enum drm_exynos_ipp_buf_ctrl buf_ctrl);
+};
+
+/*
+ * A structure of ipp driver.
+ *
+ * @list: list head.
+ * @dev: platform device.
+ * @drm_dev: drm device.
+ * @state: state of ipp drivers.
+ * @ipp_id: id of ipp driver.
+ * @dedicated: dedicated ipp device.
+ * @iommu_used: iommu used status.
+ * @cmd: used command.
+ * @ops: source, destination operations.
+ * @property: current property.
+ * @prop_idr: property idr.
+ * @cmd_list: list head to command information.
+ * @event_list: list head to event information.
+ * @reset: reset ipp block.
+ * @check_property: check property about format, size, buffer.
+ * @start: ipp each device start.
+ * @stop: ipp each device stop.
+ */
+struct exynos_drm_ippdrv {
+ struct list_head list;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ enum drm_exynos_ipp_state state;
+ u32 ipp_id;
+ bool dedicated;
+ bool iommu_used;
+ enum drm_exynos_ipp_cmd cmd;
+ struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
+ struct drm_exynos_ipp_property *property;
+ struct idr prop_idr;
+ struct list_head cmd_list;
+ struct list_head event_list;
+
+ int (*check_property)(struct device *dev,
+ struct drm_exynos_ipp_property *property);
+ int (*reset)(struct device *dev);
+ int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+ void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
+};
+
+#ifdef CONFIG_DRM_EXYNOS_IPP
+extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
+extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_buf(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ipp_ctrl(struct drm_device *drm_dev, void *data,
+ struct drm_file *file);
+extern int exynos_drm_ippnb_register(struct notifier_block *nb);
+extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
+extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
+#else
+static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file_priv)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_buf(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ipp_ctrl(struct drm_device *drm_dev,
+ void *data,
+ struct drm_file *file)
+{
+ return -ENOTTY;
+}
+
+static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
+{
+ return -ENODEV;
+}
+
+static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
+{
+ return -ENOTTY;
+}
+#endif
+
+/* ToDo: Must be change to queue_work */
+void ipp_send_event_handler(struct exynos_drm_ippdrv *ippdrv,
+ int buf_idx);
+
+#endif /* _EXYNOS_DRM_IPP_H_ */
+
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c4c6525..b89829e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -12,9 +12,12 @@
#include "drmP.h"
#include "exynos_drm.h"
-#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+
+#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
struct exynos_plane {
struct drm_plane base;
@@ -30,6 +33,108 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV12MT,
};
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+ unsigned int actual_w;
+ unsigned int actual_h;
+ int nr;
+ int i;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ for (i = 0; i < nr; i++) {
+ struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
+
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
+
+ overlay->dma_addr[i] = buffer->dma_addr;
+ overlay->vaddr[i] = buffer->kvaddr;
+
+ DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->vaddr[i],
+ (unsigned long)overlay->dma_addr[i]);
+ }
+
+ actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w);
+ actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h);
+
+ /* set drm framebuffer data. */
+ overlay->fb_x = src_x;
+ overlay->fb_y = src_y;
+ overlay->fb_width = fb->width;
+ overlay->fb_height = fb->height;
+ overlay->src_width = src_w;
+ overlay->src_height = src_h;
+ overlay->bpp = fb->bits_per_pixel;
+ overlay->pitch = fb->pitches[0];
+ overlay->pixel_format = fb->pixel_format;
+
+ /* set overlay range to be displayed. */
+ overlay->crtc_x = crtc_x;
+ overlay->crtc_y = crtc_y;
+ overlay->crtc_width = actual_w;
+ overlay->crtc_height = actual_h;
+
+ /* set drm mode data. */
+ overlay->mode_width = crtc->mode.hdisplay;
+ overlay->mode_height = crtc->mode.vdisplay;
+ overlay->refresh = crtc->mode.vrefresh;
+ overlay->scan_flag = crtc->mode.flags;
+
+ DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
+ overlay->crtc_x, overlay->crtc_y,
+ overlay->crtc_width, overlay->crtc_height);
+
+ exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set);
+
+ return 0;
+}
+
+void exynos_plane_commit(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_plane_commit);
+}
+
+void exynos_plane_dpms(struct drm_plane *plane, int mode)
+{
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ if (exynos_plane->enabled)
+ return;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_plane_enable);
+
+ exynos_plane->enabled = true;
+ } else {
+ if (!exynos_plane->enabled)
+ return;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_plane_disable);
+
+ exynos_plane->enabled = false;
+ }
+}
+
static int
exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
@@ -37,64 +142,37 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct exynos_plane *exynos_plane =
- container_of(plane, struct exynos_plane, base);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
- struct exynos_drm_crtc_pos pos;
int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
- pos.crtc_x = crtc_x;
- pos.crtc_y = crtc_y;
- pos.crtc_w = crtc_w;
- pos.crtc_h = crtc_h;
-
- /* considering 16.16 fixed point of source values */
- pos.fb_x = src_x >> 16;
- pos.fb_y = src_y >> 16;
- pos.src_w = src_w >> 16;
- pos.src_h = src_h >> 16;
-
- ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+ ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y,
+ crtc_w, crtc_h, src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
if (ret < 0)
return ret;
- exynos_drm_fn_encoder(crtc, overlay,
- exynos_drm_encoder_crtc_mode_set);
- exynos_drm_fn_encoder(crtc, &overlay->zpos,
- exynos_drm_encoder_crtc_plane_commit);
+ plane->crtc = crtc;
+ plane->fb = crtc->fb;
- exynos_plane->enabled = true;
+ exynos_plane_commit(plane);
+ exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
return 0;
}
static int exynos_disable_plane(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane =
- container_of(plane, struct exynos_plane, base);
- struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
-
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!exynos_plane->enabled)
- return 0;
-
- exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
- exynos_drm_encoder_crtc_disable);
-
- exynos_plane->enabled = false;
- exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF);
return 0;
}
static void exynos_plane_destroy(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane =
- container_of(plane, struct exynos_plane, base);
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -103,69 +181,79 @@ static void exynos_plane_destroy(struct drm_plane *plane)
kfree(exynos_plane);
}
+static int exynos_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct exynos_plane *exynos_plane = to_exynos_plane(plane);
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (property == dev_priv->plane_zpos_property) {
+ exynos_plane->overlay.zpos = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static struct drm_plane_funcs exynos_plane_funcs = {
.update_plane = exynos_update_plane,
.disable_plane = exynos_disable_plane,
.destroy = exynos_plane_destroy,
+ .set_property = exynos_plane_set_property,
};
-int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+static void exynos_plane_attach_zpos_property(struct drm_plane *plane)
{
- struct exynos_plane *exynos_plane;
- uint32_t possible_crtcs;
+ struct drm_device *dev = plane->dev;
+ struct exynos_drm_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
- exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
- if (!exynos_plane)
- return -ENOMEM;
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- /* all CRTCs are available */
- possible_crtcs = (1 << MAX_CRTC) - 1;
+ prop = dev_priv->plane_zpos_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 0,
+ MAX_PLANE - 1);
+ if (!prop)
+ return;
- exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ dev_priv->plane_zpos_property = prop;
+ }
- return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
- &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
- false);
+ drm_object_attach_property(&plane->base, prop, 0);
}
-int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+ unsigned int possible_crtcs, bool priv)
{
- struct drm_exynos_plane_set_zpos *zpos_req = data;
- struct drm_mode_object *obj;
- struct drm_plane *plane;
struct exynos_plane *exynos_plane;
- int ret = 0;
+ int err;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
- if (zpos_req->zpos != DEFAULT_ZPOS) {
- DRM_ERROR("zpos not within limits\n");
- return -EINVAL;
- }
+ exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ if (!exynos_plane) {
+ DRM_ERROR("failed to allocate plane\n");
+ return NULL;
}
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, zpos_req->plane_id,
- DRM_MODE_OBJECT_PLANE);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown plane ID %d\n",
- zpos_req->plane_id);
- ret = -EINVAL;
- goto out;
+ err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+ &exynos_plane_funcs, formats, ARRAY_SIZE(formats),
+ priv);
+ if (err) {
+ DRM_ERROR("failed to initialize plane\n");
+ kfree(exynos_plane);
+ return NULL;
}
- plane = obj_to_plane(obj);
- exynos_plane = container_of(plane, struct exynos_plane, base);
-
- exynos_plane->overlay.zpos = zpos_req->zpos;
+ if (priv)
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+ else
+ exynos_plane_attach_zpos_property(&exynos_plane->base);
-out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
+ return &exynos_plane->base;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index 16b71f8..8831245 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,6 +9,12 @@
*
*/
-int exynos_plane_init(struct drm_device *dev, unsigned int nr);
-int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+void exynos_plane_commit(struct drm_plane *plane);
+void exynos_plane_dpms(struct drm_plane *plane, int mode);
+struct drm_plane *exynos_plane_init(struct drm_device *dev,
+ unsigned int possible_crtcs, bool priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 5bf1d6e..9e40790 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -15,13 +15,12 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_qos_params.h>
#include "drmP.h"
#include "exynos_drm.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
+#include "exynos_drm_ipp.h"
/* Configuration */
#define ROT_CONFIG 0x00
@@ -49,39 +48,31 @@
#define ROT_STATUS_IRQ_VAL_COMPLETE 1
#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
-/* Sourc Buffer Address */
+/* Buffer Address */
#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
+#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
-/* Source Buffer Size */
+/* Buffer Size */
#define ROT_SRC_BUF_SIZE 0x3c
-#define ROT_SRC_BUF_SIZE_H(x) ((x) << 16)
-#define ROT_SRC_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_DST_BUF_SIZE 0x5c
+#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
+#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
+#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
+#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
-/* Source Crop Position */
+/* Crop Position */
#define ROT_SRC_CROP_POS 0x40
-#define ROT_SRC_CROP_POS_Y(x) ((x) << 16)
-#define ROT_SRC_CROP_POS_X(x) ((x) << 0)
+#define ROT_DST_CROP_POS 0x60
+#define ROT_CROP_POS_Y(x) ((x) << 16)
+#define ROT_CROP_POS_X(x) ((x) << 0)
/* Source Crop Size */
#define ROT_SRC_CROP_SIZE 0x44
#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
-/* Destination Buffer Address */
-#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
-
-/* Destination Buffer Size */
-#define ROT_DST_BUF_SIZE 0x5c
-#define ROT_DST_BUF_SIZE_H(x) ((x) << 16)
-#define ROT_DST_BUF_SIZE_W(x) ((x) << 0)
-
-/* Destination Crop Position */
-#define ROT_DST_CROP_POS 0x60
-#define ROT_DST_CROP_POS_Y(x) ((x) << 16)
-#define ROT_DST_CROP_POS_X(x) ((x) << 0)
-
/* Round to nearest aligned value */
-#define ROT_ALIGN(x, align, mask) ((*(x) + (1 << ((align) - 1))) & (mask))
+#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
/* Minimum limit value */
#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
/* Maximum limit value */
@@ -111,28 +102,11 @@ struct rot_context {
struct resource *regs_res;
void __iomem *regs;
int irq;
- int exec_ret;
- struct exynos_drm_subdrv subdrv;
- struct completion complete;
- struct mutex exec_mutex;
- spinlock_t irq_lock;
- struct pm_qos_request_list pm_qos;
+ struct exynos_drm_ippdrv ippdrv;
+ int cur_buf_id[EXYNOS_DRM_OPS_MAX];
bool suspended;
};
-struct rot_buffer {
- dma_addr_t src_addr[DRM_EXYNOS_ROT_MAX_BUF];
- dma_addr_t dst_addr[DRM_EXYNOS_ROT_MAX_BUF];
- void *src_gem_obj[DRM_EXYNOS_ROT_MAX_BUF];
- void *dst_gem_obj[DRM_EXYNOS_ROT_MAX_BUF];
- u32 src_cnt;
- u32 dst_cnt;
- u32 src_w;
- u32 src_h;
- u32 dst_w;
- u32 dst_h;
-};
-
static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
{
u32 value = readl(rot->regs + ROT_CONFIG);
@@ -145,6 +119,14 @@ static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
writel(value, rot->regs + ROT_CONFIG);
}
+static u32 rotator_reg_get_format(struct rot_context *rot)
+{
+ u32 value = readl(rot->regs + ROT_CONTROL);
+ value &= ROT_CONTROL_FMT_MASK;
+
+ return value;
+}
+
static void rotator_reg_set_format(struct rot_context *rot, u32 img_fmt)
{
u32 value = readl(rot->regs + ROT_CONTROL);
@@ -155,7 +137,7 @@ static void rotator_reg_set_format(struct rot_context *rot, u32 img_fmt)
case DRM_FORMAT_NV12M:
value |= ROT_CONTROL_FMT_YCBCR420_2P;
break;
- case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
value |= ROT_CONTROL_FMT_RGB888;
break;
default:
@@ -167,16 +149,16 @@ static void rotator_reg_set_format(struct rot_context *rot, u32 img_fmt)
}
static void rotator_reg_set_flip(struct rot_context *rot,
- enum drm_exynos_rot_flip flip)
+ enum drm_exynos_flip flip)
{
u32 value = readl(rot->regs + ROT_CONTROL);
value &= ~ROT_CONTROL_FLIP_MASK;
switch (flip) {
- case ROT_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_VERTICAL:
value |= ROT_CONTROL_FLIP_VERTICAL;
break;
- case ROT_FLIP_HORIZONTAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
value |= ROT_CONTROL_FLIP_HORIZONTAL;
break;
default:
@@ -188,19 +170,19 @@ static void rotator_reg_set_flip(struct rot_context *rot,
}
static void rotator_reg_set_rotation(struct rot_context *rot,
- enum drm_exynos_rot_degree degree)
+ enum drm_exynos_degree degree)
{
u32 value = readl(rot->regs + ROT_CONTROL);
value &= ~ROT_CONTROL_ROT_MASK;
switch (degree) {
- case ROT_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_90:
value |= ROT_CONTROL_ROT_90;
break;
- case ROT_DEGREE_180:
+ case EXYNOS_DRM_DEGREE_180:
value |= ROT_CONTROL_ROT_180;
break;
- case ROT_DEGREE_270:
+ case EXYNOS_DRM_DEGREE_270:
value |= ROT_CONTROL_ROT_270;
break;
default:
@@ -247,16 +229,25 @@ static void rotator_reg_set_src_buf_addr(struct rot_context *rot,
writel(addr, rot->regs + ROT_SRC_BUF_ADDR(i));
}
+static void rotator_reg_get_src_buf_size(struct rot_context *rot, u32 *w,
+ u32 *h)
+{
+ u32 value = readl(rot->regs + ROT_SRC_BUF_SIZE);
+
+ *w = ROT_GET_BUF_SIZE_W(value);
+ *h = ROT_GET_BUF_SIZE_H(value);
+}
+
static void rotator_reg_set_src_buf_size(struct rot_context *rot, u32 w, u32 h)
{
- u32 value = ROT_SRC_BUF_SIZE_H(h) | ROT_SRC_BUF_SIZE_W(w);
+ u32 value = ROT_SET_BUF_SIZE_H(h) | ROT_SET_BUF_SIZE_W(w);
writel(value, rot->regs + ROT_SRC_BUF_SIZE);
}
static void rotator_reg_set_src_crop_pos(struct rot_context *rot, u32 x, u32 y)
{
- u32 value = ROT_SRC_CROP_POS_Y(y) | ROT_SRC_CROP_POS_X(x);
+ u32 value = ROT_CROP_POS_Y(y) | ROT_CROP_POS_X(x);
writel(value, rot->regs + ROT_SRC_CROP_POS);
}
@@ -274,16 +265,25 @@ static void rotator_reg_set_dst_buf_addr(struct rot_context *rot,
writel(addr, rot->regs + ROT_DST_BUF_ADDR(i));
}
+static void rotator_reg_get_dst_buf_size(struct rot_context *rot, u32 *w,
+ u32 *h)
+{
+ u32 value = readl(rot->regs + ROT_DST_BUF_SIZE);
+
+ *w = ROT_GET_BUF_SIZE_W(value);
+ *h = ROT_GET_BUF_SIZE_H(value);
+}
+
static void rotator_reg_set_dst_buf_size(struct rot_context *rot, u32 w, u32 h)
{
- u32 value = ROT_DST_BUF_SIZE_H(h) | ROT_DST_BUF_SIZE_W(w);
+ u32 value = ROT_SET_BUF_SIZE_H(h) | ROT_SET_BUF_SIZE_W(w);
writel(value, rot->regs + ROT_DST_BUF_SIZE);
}
static void rotator_reg_set_dst_crop_pos(struct rot_context *rot, u32 x, u32 y)
{
- u32 value = ROT_DST_CROP_POS_Y(y) | ROT_DST_CROP_POS_X(x);
+ u32 value = ROT_CROP_POS_Y(y) | ROT_CROP_POS_X(x);
writel(value, rot->regs + ROT_DST_CROP_POS);
}
@@ -294,71 +294,40 @@ static void rotator_reg_get_dump(struct rot_context *rot)
for (i = 0; i <= ROT_DST_CROP_POS; i += 0x4) {
value = readl(rot->regs + i);
- DRM_INFO("+0x%x: 0x%x", i, value);
+ DRM_INFO("[%s] [0x%x] : 0x%x\n", __func__, i, value);
}
}
-static bool rotator_check_format_n_handle_valid(u32 img_fmt,
- u32 src_buf_handle_cnt,
- u32 dst_buf_handle_cnt)
+static irqreturn_t rotator_irq_handler(int irq, void *arg)
{
- bool ret = false;
+ struct rot_context *rot = arg;
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ enum rot_irq_status irq_status;
- if ((src_buf_handle_cnt != dst_buf_handle_cnt)
- || (src_buf_handle_cnt == 0))
- return ret;
+ /* Get execution result */
+ irq_status = rotator_reg_get_irq_status(rot);
+ rotator_reg_set_irq_status_clear(rot, irq_status);
- switch (img_fmt) {
- case DRM_FORMAT_NV12M:
- if (src_buf_handle_cnt == 2)
- ret = true;
- break;
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_RGB888:
- if (src_buf_handle_cnt == 1)
- ret = true;
- break;
- default:
- DRM_ERROR("invalid image format\n");
- break;
+ if (irq_status == ROT_IRQ_STATUS_COMPLETE)
+ ipp_send_event_handler(ippdrv,
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST]);
+ else {
+ DRM_ERROR("the SFR is set illegally\n");
+ rotator_reg_get_dump(rot);
}
- return ret;
-}
-
-static void rotator_align_size(struct rot_limit *limit, u32 mask, u32 *w,
- u32 *h)
-{
- u32 value;
-
- value = ROT_ALIGN(w, limit->align, mask);
- if (value < limit->min_w)
- *w = ROT_MIN(limit->min_w, mask);
- else if (value > limit->max_w)
- *w = ROT_MAX(limit->max_w, mask);
- else
- *w = value;
-
- value = ROT_ALIGN(h, limit->align, mask);
- if (value < limit->min_h)
- *h = ROT_MIN(limit->min_h, mask);
- else if (value > limit->max_h)
- *h = ROT_MAX(limit->max_h, mask);
- else
- *h = value;
+ return IRQ_HANDLED;
}
-static void rotator_align_buffer(struct rot_context *rot,
- struct rot_buffer *buf,
- struct drm_exynos_rot_buffer *req_buf,
- struct drm_exynos_rot_control *control)
+static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
+ u32 *vsize)
{
struct rot_limit_table *limit_tbl = rot->limit_tbl;
struct rot_limit *limit;
- u32 mask;
+ u32 mask, value;
/* Get size limit */
- if (control->img_fmt == DRM_FORMAT_RGB888)
+ if (fmt == ROT_CONTROL_FMT_RGB888)
limit = &limit_tbl->rgb888;
else
limit = &limit_tbl->ycbcr420_2p;
@@ -366,310 +335,333 @@ static void rotator_align_buffer(struct rot_context *rot,
/* Get mask for rounding to nearest aligned value */
mask = ~((1 << limit->align) - 1);
- /* For source buffer */
- buf->src_w = req_buf->src_w;
- buf->src_h = req_buf->src_h;
- rotator_align_size(limit, mask, &buf->src_w, &buf->src_h);
+ /* Set aligned width */
+ value = ROT_ALIGN(*hsize, limit->align, mask);
+ if (value < limit->min_w)
+ *hsize = ROT_MIN(limit->min_w, mask);
+ else if (value > limit->max_w)
+ *hsize = ROT_MAX(limit->max_w, mask);
+ else
+ *hsize = value;
- /* For destination buffer */
- buf->dst_w = req_buf->dst_w;
- buf->dst_h = req_buf->dst_h;
- rotator_align_size(limit, mask, &buf->dst_w, &buf->dst_h);
+ /* Set aligned height */
+ value = ROT_ALIGN(*vsize, limit->align, mask);
+ if (value < limit->min_h)
+ *vsize = ROT_MIN(limit->min_h, mask);
+ else if (value > limit->max_h)
+ *vsize = ROT_MAX(limit->max_h, mask);
+ else
+ *vsize = value;
}
-static bool rotator_check_crop_boundary(struct rot_buffer *buf,
- struct drm_exynos_rot_control *control,
- struct drm_exynos_rot_crop *crop)
+static int rotator_src_set_fmt(struct device *dev, u32 fmt)
{
- bool ret = true;
+ struct rot_context *rot = dev_get_drvdata(dev);
- /* Check source crop position */
- if ((crop->src_x + crop->src_w > buf->src_w)
- || (crop->src_y + crop->src_h > buf->src_h))
- return false;
+ /* Set format configuration */
+ rotator_reg_set_format(rot, fmt);
- /* Check destination crop position */
- switch (control->degree) {
- case ROT_DEGREE_90:
- case ROT_DEGREE_270:
- if ((crop->dst_x + crop->src_h > buf->dst_w)
- || (crop->dst_y + crop->src_w > buf->dst_h))
- ret = false;
- break;
- default:
- if ((crop->dst_x + crop->src_w > buf->dst_w)
- || (crop->dst_y + crop->src_h > buf->dst_h))
- ret = false;
- break;
- }
-
- return ret;
+ return 0;
}
-static int rotator_iommu_map(struct rot_buffer *buf,
- struct drm_exynos_rot_buffer *req_buf,
- struct iommu_gem_map_params *params,
- struct list_head *iommu_list)
-{
- /* For source buffer */
- buf->src_cnt = 0;
- while (buf->src_cnt < req_buf->src_cnt) {
- buf->src_addr[buf->src_cnt] = exynos_drm_iommu_map_gem(params,
- iommu_list,
- req_buf->src_handle[buf->src_cnt],
- IOMMU_ROTATOR);
- if (!buf->src_addr[buf->src_cnt]) {
- DRM_ERROR("failed to map src handle[%u]\n",
- buf->src_cnt);
- return -EINVAL;
- }
- buf->src_gem_obj[(buf->src_cnt)++] = params->gem_obj;
- }
+static int rotator_src_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
- /* For destination buffer */
- buf->dst_cnt = 0;
- while (buf->dst_cnt < req_buf->dst_cnt) {
- buf->dst_addr[buf->dst_cnt] = exynos_drm_iommu_map_gem(params,
- iommu_list,
- req_buf->dst_handle[buf->dst_cnt],
- IOMMU_ROTATOR);
- if (!buf->dst_addr[buf->dst_cnt]) {
- DRM_ERROR("failed to map dst handle[%u]\n",
- buf->dst_cnt);
- return -EINVAL;
- }
- buf->dst_gem_obj[(buf->dst_cnt)++] = params->gem_obj;
- }
+ /* Get format */
+ fmt = rotator_reg_get_format(rot);
- return 0;
-}
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
-static void rotator_iommu_unmap(struct rot_buffer *buf,
- struct iommu_gem_map_params *params)
-{
- /* For destination buffer */
- while (buf->dst_cnt > 0) {
- params->gem_obj = buf->dst_gem_obj[--(buf->dst_cnt)];
- exynos_drm_iommu_unmap_gem(params,
- buf->dst_addr[buf->dst_cnt],
- IOMMU_ROTATOR);
- }
+ /* Set buffer size configuration */
+ rotator_reg_set_src_buf_size(rot, hsize, vsize);
- /* For source buffer */
- while (buf->src_cnt > 0) {
- params->gem_obj = buf->src_gem_obj[--(buf->src_cnt)];
- exynos_drm_iommu_unmap_gem(params,
- buf->src_addr[buf->src_cnt],
- IOMMU_ROTATOR);
- }
+ /* Set crop image position configuration */
+ rotator_reg_set_src_crop_pos(rot, pos->x, pos->y);
+ rotator_reg_set_src_crop_size(rot, pos->w, pos->h);
+
+ return 0;
}
-static void rotator_execute(struct rot_context *rot,
- struct rot_buffer *buf,
- struct drm_exynos_rot_control *control,
- struct drm_exynos_rot_crop *crop)
+static int rotator_src_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_ctrl ctrl)
{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 fmt, hsize, vsize;
int i;
- pm_runtime_get_sync(rot->subdrv.dev);
-
- /* Set interrupt enable */
- rotator_reg_set_irq(rot, true);
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
- /* Set control registers */
- rotator_reg_set_format(rot, control->img_fmt);
- rotator_reg_set_flip(rot, control->flip);
- rotator_reg_set_rotation(rot, control->degree);
+ switch (ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ /* Set address configuration */
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++)
+ addr[i] = buf_info->base[i];
- /* Set source buffer address */
- for (i = 0; i < DRM_EXYNOS_ROT_MAX_BUF; i++)
- rotator_reg_set_src_buf_addr(rot, buf->src_addr[i], i);
+ /* Get format */
+ fmt = rotator_reg_get_format(rot);
- /* Set source buffer size */
- rotator_reg_set_src_buf_size(rot, buf->src_w, buf->src_h);
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ (addr[EXYNOS_DRM_PLANAR_CB] == 0x00)) {
+ /* Get buf size */
+ rotator_reg_get_src_buf_size(rot, &hsize, &vsize);
- /* Set destination buffer address */
- for (i = 0; i < DRM_EXYNOS_ROT_MAX_BUF; i++)
- rotator_reg_set_dst_buf_addr(rot, buf->dst_addr[i], i);
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
- /* Set destination buffer size */
- rotator_reg_set_dst_buf_size(rot, buf->dst_w, buf->dst_h);
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++)
+ rotator_reg_set_src_buf_addr(rot, addr[i], i);
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++)
+ rotator_reg_set_src_buf_addr(rot, buf_info->base[i], i);
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
- /* Set source crop image position */
- rotator_reg_set_src_crop_pos(rot, crop->src_x, crop->src_y);
+ return 0;
+}
- /* Set source crop image size */
- rotator_reg_set_src_crop_size(rot, crop->src_w, crop->src_h);
+static int rotator_dst_set_transf(struct device *dev,
+ enum drm_exynos_degree degree,
+ enum drm_exynos_flip flip)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
- /* Set destination crop image position */
- rotator_reg_set_dst_crop_pos(rot, crop->dst_x, crop->dst_y);
+ /* Set transform configuration */
+ rotator_reg_set_flip(rot, flip);
+ rotator_reg_set_rotation(rot, degree);
- /* Start rotator operation */
- rotator_reg_set_start(rot);
+ /* Check degree for setting buffer size swap */
+ if ((degree == EXYNOS_DRM_DEGREE_90) ||
+ (degree == EXYNOS_DRM_DEGREE_270))
+ return 1;
+ else
+ return 0;
}
-int exynos_drm_rotator_exec_ioctl(struct drm_device *drm_dev, void *data,
- struct drm_file *file)
+static int rotator_dst_set_size(struct device *dev, int swap,
+ struct drm_exynos_pos *pos,
+ struct drm_exynos_sz *sz)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_rot_private *priv = file_priv->rot_priv;
- struct device *dev = priv->dev;
- struct rot_context *rot;
- struct drm_exynos_rot_exec_data *req = data;
- struct drm_exynos_rot_buffer *req_buf = &req->buf;
- struct drm_exynos_rot_control *control = &req->control;
- struct drm_exynos_rot_crop *crop = &req->crop;
- struct rot_buffer buf;
- struct iommu_gem_map_params params;
-
- if (!dev) {
- DRM_ERROR("failed to get dev\n");
- return -ENODEV;
- }
+ struct rot_context *rot = dev_get_drvdata(dev);
+ u32 fmt, hsize, vsize;
- rot = dev_get_drvdata(dev);
- if (!rot) {
- DRM_ERROR("failed to get drvdata\n");
- return -EFAULT;
- }
+ /* Get format */
+ fmt = rotator_reg_get_format(rot);
- if (rot->suspended) {
- DRM_ERROR("suspended state\n");
- return -EPERM;
- }
-
- if (!rotator_check_format_n_handle_valid(control->img_fmt,
- req_buf->src_cnt,
- req_buf->dst_cnt)) {
- DRM_ERROR("format or handles are invalid\n");
- return -EINVAL;
- }
+ /* Align buffer size */
+ hsize = sz->hsize;
+ vsize = sz->vsize;
+ rotator_align_size(rot, fmt, &hsize, &vsize);
- init_completion(&rot->complete);
+ /* Set buffer size configuration */
+ rotator_reg_set_dst_buf_size(rot, hsize, vsize);
- /* Align buffer */
- rotator_align_buffer(rot, &buf, req_buf, control);
+ /* Set crop image position configuration */
+ rotator_reg_set_dst_crop_pos(rot, pos->x, pos->y);
- /* Check crop boundary */
- if (!rotator_check_crop_boundary(&buf, control, crop)) {
- DRM_ERROR("boundary errror\n");
- return -EINVAL;
- }
+ return 0;
+}
- params.dev = dev;
- params.drm_dev = drm_dev;
- params.file = file;
+static int rotator_dst_set_addr(struct device *dev,
+ struct drm_exynos_ipp_buf_info *buf_info,
+ u32 buf_id, enum drm_exynos_ipp_buf_ctrl ctrl)
+{
+ struct rot_context *rot = dev_get_drvdata(dev);
+ dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
+ u32 fmt, hsize, vsize;
+ int i;
- /* Map IOMMU */
- rot->exec_ret = rotator_iommu_map(&buf, req_buf, &params,
- &priv->iommu_list);
- if (rot->exec_ret < 0)
- goto err_iommu_map;
+ /* Set current buf_id */
+ rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
- /* Assign another src/dst_addr for NV12 image format */
- if (control->img_fmt == DRM_FORMAT_NV12) {
- u32 size = crop->src_w * crop->src_h;
+ switch (ctrl) {
+ case IPP_BUF_CTRL_QUEUE:
+ /* Set address configuration */
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++)
+ addr[i] = buf_info->base[i];
- buf.src_addr[buf.src_cnt + 1] =
- buf.src_addr[buf.src_cnt] + size;
- buf.dst_addr[buf.dst_cnt + 1] =
- buf.dst_addr[buf.dst_cnt] + size;
- }
+ /* Get format */
+ fmt = rotator_reg_get_format(rot);
- /* Execute */
- mutex_lock(&rot->exec_mutex);
- rotator_execute(rot, &buf, control, crop);
- if (!wait_for_completion_timeout(&rot->complete, 2 * HZ)) {
- DRM_ERROR("timeout error\n");
- rot->exec_ret = -ETIMEDOUT;
- mutex_unlock(&rot->exec_mutex);
- goto err_iommu_map;
- }
- mutex_unlock(&rot->exec_mutex);
+ /* Re-set cb planar for NV12 format */
+ if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
+ (addr[EXYNOS_DRM_PLANAR_CB] == 0x00)) {
+ /* Get buf size */
+ rotator_reg_get_dst_buf_size(rot, &hsize, &vsize);
- /* Unmap IOMMU */
- rotator_iommu_unmap(&buf, &params);
+ /* Set cb planar */
+ addr[EXYNOS_DRM_PLANAR_CB] =
+ addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
+ }
- return rot->exec_ret;
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++)
+ rotator_reg_set_dst_buf_addr(rot, addr[i], i);
+ break;
+ case IPP_BUF_CTRL_DEQUEUE:
+ for (i = 0; i < EXYNOS_DRM_PLANAR_MAX; i++)
+ rotator_reg_set_dst_buf_addr(rot, buf_info->base[i], i);
+ break;
+ default:
+ /* Nothing to do */
+ break;
+ }
-err_iommu_map:
- rotator_iommu_unmap(&buf, &params);
- return rot->exec_ret;
+ return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_rotator_exec_ioctl);
-static irqreturn_t rotator_irq_thread(int irq, void *arg)
-{
- struct rot_context *rot = (struct rot_context *)arg;
- enum rot_irq_status irq_status;
- unsigned long flags;
+static struct exynos_drm_ipp_ops rot_src_ops = {
+ .set_fmt = rotator_src_set_fmt,
+ .set_size = rotator_src_set_size,
+ .set_addr = rotator_src_set_addr,
+};
- pm_qos_update_request(&rot->pm_qos, 0);
+static struct exynos_drm_ipp_ops rot_dst_ops = {
+ .set_transf = rotator_dst_set_transf,
+ .set_size = rotator_dst_set_size,
+ .set_addr = rotator_dst_set_addr,
+};
- /* Get execution result */
- spin_lock_irqsave(&rot->irq_lock, flags);
- irq_status = rotator_reg_get_irq_status(rot);
- rotator_reg_set_irq_status_clear(rot, irq_status);
- spin_unlock_irqrestore(&rot->irq_lock, flags);
+static int rotator_ippdrv_check_property(struct device *dev,
+ struct drm_exynos_ipp_property *property)
+{
+ struct drm_exynos_ipp_config *src_config =
+ &property->config[EXYNOS_DRM_OPS_SRC];
+ struct drm_exynos_ipp_config *dst_config =
+ &property->config[EXYNOS_DRM_OPS_DST];
+ struct drm_exynos_pos *src_pos = &src_config->pos;
+ struct drm_exynos_pos *dst_pos = &dst_config->pos;
+ struct drm_exynos_sz *src_sz = &src_config->sz;
+ struct drm_exynos_sz *dst_sz = &dst_config->sz;
+ bool swap = false;
+
+ /* Check format configuration */
+ if (src_config->fmt != dst_config->fmt) {
+ DRM_DEBUG_KMS("[%s]not support csc feature\n", __func__);
+ return -EINVAL;
+ }
- rot->exec_ret = 0;
- if (irq_status != ROT_IRQ_STATUS_COMPLETE) {
- DRM_ERROR("the SFR is set illegally\n");
- rot->exec_ret = -EINVAL;
- rotator_reg_get_dump(rot);
+ switch (src_config->fmt) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV12M:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("[%s]not support format\n", __func__);
+ return -EINVAL;
}
- pm_runtime_put(rot->subdrv.dev);
+ /* Check transform configuration */
+ if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
+ DRM_DEBUG_KMS("[%s]not support source-side rotation\n",
+ __func__);
+ return -EINVAL;
+ }
- complete(&rot->complete);
+ switch (dst_config->degree) {
+ case EXYNOS_DRM_DEGREE_90:
+ case EXYNOS_DRM_DEGREE_270:
+ swap = true;
+ case EXYNOS_DRM_DEGREE_0:
+ case EXYNOS_DRM_DEGREE_180:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("[%s]invalid degree\n", __func__);
+ return -EINVAL;
+ }
- return IRQ_HANDLED;
-}
+ if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
+ DRM_DEBUG_KMS("[%s]not support source-side flip\n", __func__);
+ return -EINVAL;
+ }
-static int rotator_subdrv_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
-{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_rot_private *priv;
+ switch (dst_config->flip) {
+ case EXYNOS_DRM_FLIP_NONE:
+ case EXYNOS_DRM_FLIP_VERTICAL:
+ case EXYNOS_DRM_FLIP_HORIZONTAL:
+ /* No problem */
+ break;
+ default:
+ DRM_DEBUG_KMS("[%s]invalid flip\n", __func__);
+ return -EINVAL;
+ }
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(dev, "failed to allocate priv\n");
- return -ENOMEM;
+ /* Check size configuration */
+ if ((src_pos->x + src_pos->w > src_sz->hsize) ||
+ (src_pos->y + src_pos->h > src_sz->vsize)) {
+ DRM_DEBUG_KMS("[%s]out of source buffer bound\n", __func__);
+ return -EINVAL;
}
- priv->dev = dev;
- INIT_LIST_HEAD(&priv->iommu_list);
+ if (swap) {
+ if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
+ (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
+ DRM_DEBUG_KMS("[%s]out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
- file_priv->rot_priv = priv;
+ if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
+ DRM_DEBUG_KMS("[%s]not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
+ (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
+ DRM_DEBUG_KMS("[%s]out of destination buffer bound\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
+ DRM_DEBUG_KMS("[%s]not support scale feature\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
return 0;
}
-static void rotator_subdrv_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_rot_private *priv = file_priv->rot_priv;
- struct iommu_gem_map_params params;
- struct iommu_info_node *node, *n;
+ struct rot_context *rot = dev_get_drvdata(dev);
- params.dev = dev;
- params.drm_dev = drm_dev;
- params.file = file;
+ if (rot->suspended) {
+ DRM_ERROR("suspended state\n");
+ return -EPERM;
+ }
- list_for_each_entry_safe(node, n, &priv->iommu_list, list) {
- params.gem_obj = node->gem_obj;
- exynos_drm_iommu_unmap_gem(&params, node->dma_addr,
- IOMMU_ROTATOR);
- list_del(&node->list);
- kfree(node);
- node = NULL;
+ if (cmd != IPP_CMD_M2M) {
+ DRM_ERROR("not support cmd: %d\n", cmd);
+ return -EINVAL;
}
- kfree(priv);
+ /* Set interrupt enable */
+ rotator_reg_set_irq(rot, true);
+
+ /* start rotator operation */
+ rotator_reg_set_start(rot);
- return;
+ return 0;
}
static int __devinit rotator_probe(struct platform_device *pdev)
@@ -677,7 +669,7 @@ static int __devinit rotator_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct rot_context *rot;
struct resource *res;
- struct exynos_drm_subdrv *subdrv;
+ struct exynos_drm_ippdrv *ippdrv;
int ret;
rot = kzalloc(sizeof(*rot), GFP_KERNEL);
@@ -689,21 +681,6 @@ static int __devinit rotator_probe(struct platform_device *pdev)
rot->limit_tbl = (struct rot_limit_table *)
platform_get_device_id(pdev)->driver_data;
- mutex_init(&rot->exec_mutex);
- spin_lock_init(&rot->irq_lock);
-
- ret = exynos_drm_iommu_setup(dev);
- if (ret < 0) {
- dev_err(dev, "failed to setup iommu\n");
- goto err_iommu_setup;
- }
-
- ret = exynos_drm_iommu_activate(dev);
- if (ret < 0) {
- dev_err(dev, "failed to activate iommu\n");
- goto err_iommu_activate;
- }
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "failed to find registers\n");
@@ -733,7 +710,7 @@ static int __devinit rotator_probe(struct platform_device *pdev)
goto err_get_irq;
}
- ret = request_threaded_irq(rot->irq, NULL, rotator_irq_thread,
+ ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
IRQF_ONESHOT, "drm_rotator", rot);
if (ret < 0) {
dev_err(dev, "failed to request irq\n");
@@ -748,26 +725,28 @@ static int __devinit rotator_probe(struct platform_device *pdev)
}
pm_runtime_enable(dev);
- pm_qos_add_request(&rot->pm_qos, PM_QOS_BUS_DMA_THROUGHPUT, 0);
- subdrv = &rot->subdrv;
- subdrv->dev = dev;
- subdrv->open = rotator_subdrv_open;
- subdrv->close = rotator_subdrv_close;
+ ippdrv = &rot->ippdrv;
+ ippdrv->dev = dev;
+ ippdrv->iommu_used = true;
+ ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
+ ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
+ ippdrv->check_property = rotator_ippdrv_check_property;
+ ippdrv->start = rotator_ippdrv_start;
platform_set_drvdata(pdev, rot);
- ret = exynos_drm_subdrv_register(subdrv);
+ ret = exynos_drm_ippdrv_register(ippdrv);
if (ret < 0) {
dev_err(dev, "failed to register drm rotator device\n");
- goto err_subdrv_register;
+ goto err_ippdrv_register;
}
dev_info(dev, "The exynos rotator is probed successfully\n");
return 0;
-err_subdrv_register:
+err_ippdrv_register:
pm_runtime_disable(dev);
clk_put(rot->clock);
err_clk_get:
@@ -778,23 +757,18 @@ err_ioremap:
release_resource(rot->regs_res);
kfree(rot->regs_res);
err_get_resource:
- exynos_drm_iommu_deactivate(dev);
-err_iommu_activate:
- exynos_drm_iommu_cleanup(dev);
-err_iommu_setup:
kfree(rot);
return ret;
}
static int __devexit rotator_remove(struct platform_device *pdev)
{
- struct rot_context *rot = platform_get_drvdata(pdev);
-
- pm_qos_remove_request(&rot->pm_qos);
+ struct device *dev = &pdev->dev;
+ struct rot_context *rot = dev_get_drvdata(dev);
- exynos_drm_subdrv_unregister(&rot->subdrv);
+ exynos_drm_ippdrv_unregister(&rot->ippdrv);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
clk_put(rot->clock);
free_irq(rot->irq, rot);
@@ -804,9 +778,6 @@ static int __devexit rotator_remove(struct platform_device *pdev)
release_resource(rot->regs_res);
kfree(rot->regs_res);
- exynos_drm_iommu_deactivate(&pdev->dev);
- exynos_drm_iommu_cleanup(&pdev->dev);
-
kfree(rot);
return 0;
@@ -841,14 +812,13 @@ struct platform_device_id rotator_driver_ids[] = {
static int rotator_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
-
- /* Check & wait for running state */
- mutex_lock(&rot->exec_mutex);
- mutex_unlock(&rot->exec_mutex);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct exynos_drm_private *drm_priv = drm_dev->dev_private;
rot->suspended = true;
- exynos_drm_iommu_deactivate(dev);
+ exynos_drm_iommu_deactivate(drm_priv->vmm, dev);
return 0;
}
@@ -856,12 +826,18 @@ static int rotator_suspend(struct device *dev)
static int rotator_resume(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
+ struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
+ struct drm_device *drm_dev = ippdrv->drm_dev;
+ struct exynos_drm_private *drm_priv = drm_dev->dev_private;
+ int ret;
- rot->suspended = false;
+ ret = exynos_drm_iommu_activate(drm_priv->vmm, dev);
+ if (ret)
+ DRM_ERROR("failed to activate iommu\n");
- exynos_drm_iommu_activate(dev);
+ rot->suspended = false;
- return 0;
+ return ret;
}
#endif
@@ -880,7 +856,6 @@ static int rotator_runtime_resume(struct device *dev)
struct rot_context *rot = dev_get_drvdata(dev);
clk_enable(rot->clock);
- pm_qos_update_request(&rot->pm_qos, 400000);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
index 5f383d5..fe929c9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -10,16 +10,4 @@
#ifndef _EXYNOS_DRM_ROTATOR_H_
#define _EXYNOS_DRM_ROTATOR_H_
-#ifdef CONFIG_DRM_EXYNOS_ROTATOR
-extern int exynos_drm_rotator_exec_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-#else
-static inline int exynos_drm_rotator_exec_ioctl(struct drm_device *dev,
- void *data,
- struct drm_file *file_priv)
-{
- return -ENOTTY;
-}
-#endif
-
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ump.c b/drivers/gpu/drm/exynos/exynos_drm_ump.c
index fd9ba2a..f92759f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ump.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ump.c
@@ -85,7 +85,7 @@ static int exynos_drm_ump_add_buffer(void *obj,
* physically continuous buffer so let a ump descriptor
* have one buffer address.
*/
- ump_mem_desc[0].addr = (unsigned long)buf->dma_addr;
+ ump_mem_desc[0].addr = (unsigned long)buf->paddr;
ump_mem_desc[0].size = buf->size;
}
@@ -115,13 +115,6 @@ static int exynos_drm_ump_add_buffer(void *obj,
static void exynos_drm_ump_release_buffer(unsigned int handle)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (!handle) {
- DRM_DEBUG_KMS("invalid ump handle.\n");
- return;
- }
-
- ump_dd_reference_release((ump_dd_handle)handle);
}
static struct exynos_drm_private_cb ump_callback = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 876e460..44bcdc8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -468,7 +468,7 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
return 0;
}
-static void vidi_subdrv_remove(struct drm_device *drm_dev)
+static void vidi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -549,6 +549,8 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
+ struct edid *raw_edid;
+ int edid_len;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -557,11 +559,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- if (!vidi->edid) {
- DRM_DEBUG_KMS("edid data is null.\n");
- return -EINVAL;
- }
-
if (vidi->connection > 1) {
DRM_DEBUG_KMS("connection should be 0 or 1.\n");
return -EINVAL;
@@ -588,8 +585,23 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
return -EINVAL;
}
- if (vidi->connection)
- ctx->raw_edid = (struct edid *)vidi->edid;
+ if (vidi->connection) {
+ if (!vidi->edid) {
+ DRM_DEBUG_KMS("edid data is null.\n");
+ return -EINVAL;
+ }
+ raw_edid = (struct edid *)vidi->edid;
+ edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
+ ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
+ if (!ctx->raw_edid) {
+ DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ return -ENOMEM;
+ }
+ memcpy(ctx->raw_edid, raw_edid, edid_len);
+ } else {
+ kfree(ctx->raw_edid);
+ ctx->raw_edid = NULL;
+ }
ctx->connected = vidi->connection;
drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
@@ -644,6 +656,7 @@ static int __devexit vidi_remove(struct platform_device *pdev)
exynos_drm_subdrv_unregister(&ctx->subdrv);
+ kfree(ctx->raw_edid);
kfree(ctx);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 0c44cb7..48d7f98 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -37,6 +37,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_hdmi.h"
+#include "exynos_drm_iommu.h"
#include "exynos_hdmi.h"
@@ -61,6 +62,7 @@ struct hdmi_context {
bool powered;
bool is_v13;
bool dvi_mode;
+ bool iommu_on;
struct mutex hdmi_mutex;
struct resource *regs_res;
@@ -927,8 +929,6 @@ static const struct hdmi_conf hdmi_confs[] = {
{ 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 },
{ 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 },
{ 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
- { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
- { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
{ 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
};
@@ -1838,6 +1838,104 @@ static void hdmi_timing_apply(struct hdmi_context *hdata)
hdmi_v14_timing_apply(hdata);
}
+static int hdmi_phy_ctrl(struct i2c_client *client, u8 reg, u8 bit,
+ u8 *read_buffer, bool enable)
+{
+ int ret;
+ u8 operation[2];
+
+ operation[0] = reg;
+ operation[1] = enable ? (read_buffer[reg] & (~(1 << bit))) :
+ (read_buffer[reg] | (1 << bit));
+ read_buffer[reg] = operation[1];
+
+ ret = i2c_master_send(client, operation, 2);
+ if (ret != 2) {
+ DRM_ERROR("failed to turn %s HDMIPHY via I2C\n",
+ enable ? "enable" : "disable");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hdmi_phy_power_ctrl(struct hdmi_context *hdata, bool enable)
+{
+ struct hdmi_resources *res = &hdata->res;
+ u8 operation[2];
+ u8 read_buffer[32];
+ int ret = 0, i;
+
+ DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
+
+ clk_enable(res->hdmiphy);
+
+ /* read full register */
+ operation[0] = 0x1;
+ i2c_master_send(hdata->hdmiphy_port, operation, 1);
+
+ memset(read_buffer, 0x0, sizeof(read_buffer));
+ ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+ if (ret < 0) {
+ DRM_ERROR("failed to read hdmiphy config\n");
+ goto err_clear;
+ }
+
+ for (i = 0; i < ret; i++)
+ DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+ "recv [0x%02x]\n", i, operation[i], read_buffer[i]);
+
+ /* ocspad control */
+ operation[0] = 0x0b;
+ if (enable)
+ operation[1] = 0xd8;
+ else
+ operation[1] = 0x18;
+ read_buffer[0x0b] = operation[1];
+
+ ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+ if (ret != 2) {
+ DRM_ERROR("failed to %s osc pad\n",
+ enable ? "enable" : "disable");
+ goto err_clear;
+ }
+
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x7, read_buffer, enable);
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x0, read_buffer, enable);
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x1, read_buffer, enable);
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x2, read_buffer, enable);
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x4, read_buffer, enable);
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x5, read_buffer, enable);
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x1d, 0x6, read_buffer, enable);
+
+ if (!enable)
+ hdmi_phy_ctrl(hdata->hdmiphy_port, 0x4, 0x3, read_buffer, 0);
+
+ /* read full register */
+ operation[0] = 0x1;
+ i2c_master_send(hdata->hdmiphy_port, operation, 1);
+
+ memset(read_buffer, 0x0, sizeof(read_buffer));
+ ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+ if (ret < 0) {
+ DRM_ERROR("failed to read hdmiphy config\n");
+ goto err_clear;
+ }
+
+ for (i = 0; i < ret; i++)
+ DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+ "recv [0x%02x]\n", i, operation[i], read_buffer[i]);
+
+ clk_disable(res->hdmiphy);
+
+ return 0;
+
+err_clear:
+ clk_disable(res->hdmiphy);
+
+ return ret;
+}
+
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
u8 buffer[2];
@@ -2003,15 +2101,46 @@ static void hdmi_get_max_resol(void *ctx, unsigned int *width,
static void hdmi_commit(void *ctx)
{
struct hdmi_context *hdata = ctx;
+ struct exynos_drm_private *drm_priv;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct drm_device *drm_dev;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
hdmi_conf_apply(hdata);
+
+ /*
+ * parent_ctx is created at hdmi_probe() and
+ * parent_ctx->drm_dev is set at hdmi_subdrv_probe()
+ */
+ drm_hdmi_ctx = hdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
+ if (drm_dev)
+ drm_priv = drm_dev->dev_private;
+ else
+ return;
+
+ /*
+ * if iommu support for exynos drm was enabled, this function is
+ * called first time(!hdata->iommu_on) then enable iommu unit.
+ */
+ if (drm_priv->vmm && !hdata->iommu_on) {
+ int ret;
+
+ ret = exynos_drm_iommu_activate(drm_priv->vmm, hdata->dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to activate iommu.\n");
+ return;
+ }
+
+ hdata->iommu_on = true;
+ }
}
static void hdmi_poweron(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
+ int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -2029,6 +2158,32 @@ static void hdmi_poweron(struct hdmi_context *hdata)
pm_runtime_get_sync(hdata->dev);
+ ret = hdmi_phy_power_ctrl(hdata, true);
+ if (ret) {
+ DRM_ERROR("failed to control phy power\n");
+ return;
+ }
+
+ if (hdata->iommu_on) {
+ struct exynos_drm_private *drm_priv;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct drm_device *drm_dev;
+
+ drm_hdmi_ctx = hdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
+
+ if (drm_dev)
+ drm_priv = drm_dev->dev_private;
+ else
+ return;
+
+ ret = exynos_drm_iommu_activate(drm_priv->vmm, hdata->dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to activate iommu.\n");
+ return;
+ }
+ }
+
regulator_bulk_enable(res->regul_count, res->regul_bulk);
clk_enable(res->hdmiphy);
clk_enable(res->hdmi);
@@ -2038,6 +2193,7 @@ static void hdmi_poweron(struct hdmi_context *hdata)
static void hdmi_poweroff(struct hdmi_context *hdata)
{
struct hdmi_resources *res = &hdata->res;
+ int ret;
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -2057,16 +2213,48 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
clk_disable(res->hdmiphy);
regulator_bulk_disable(res->regul_count, res->regul_bulk);
- pm_runtime_put_sync(hdata->dev);
-
mutex_lock(&hdata->hdmi_mutex);
if (hdata->cfg_hpd)
hdata->cfg_hpd(false);
+ ret = hdmi_phy_power_ctrl(hdata, false);
+ if (ret) {
+ DRM_ERROR("failed to control phy power\n");
+ return;
+ }
+
+ if (hdata->iommu_on) {
+ struct exynos_drm_private *drm_priv;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct drm_device *drm_dev;
+
+ drm_hdmi_ctx = hdata->parent_ctx;
+ drm_dev = drm_hdmi_ctx->drm_dev;
+ if (drm_dev)
+ drm_priv = drm_dev->dev_private;
+ else {
+ if (hdata->cfg_hpd)
+ hdata->cfg_hpd(true);
+
+ regulator_bulk_enable(res->regul_count,
+ res->regul_bulk);
+
+ clk_enable(res->hdmiphy);
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+
+ mutex_unlock(&hdata->hdmi_mutex);
+ return;
+ }
+
+ exynos_drm_iommu_deactivate(drm_priv->vmm, hdata->dev);
+ }
+
hdata->powered = false;
out:
mutex_unlock(&hdata->hdmi_mutex);
+ pm_runtime_put_sync(hdata->dev);
}
static void hdmi_dpms(void *ctx, int mode)
@@ -2350,6 +2538,17 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
hdata->hdmiphy_port = hdmi_hdmiphy;
+ /*
+ * HDMI PHY power off
+ * HDMI PHY is on as default configuration
+ * So, HDMI PHY must be turned off if it's not used
+ */
+ ret = hdmi_phy_power_ctrl(hdata, false);
+ if (ret) {
+ DRM_ERROR("failed to control phy power\n");
+ goto err_hdmiphy;
+ }
+
hdata->external_irq = platform_get_irq_byname(pdev, "external_irq");
if (hdata->external_irq < 0) {
DRM_ERROR("failed to get platform irq\n");
@@ -2435,6 +2634,7 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
i2c_del_driver(&ddc_driver);
kfree(hdata);
+ kfree(ctx);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 68ef010..206cbbc 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
/* setting graphical layers */
-
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
/* the same configuration for both layers */
mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+ /* setting video layers */
+ val = MXR_GRP_CFG_ALPHA_VAL(0);
+ mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 0000000..be014b3
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
+/* drivers/gpu/drm/exynos/regs-fimc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Register definition file for Samsung Camera Interface (FIMC) driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_FIMC_H
+#define SAMSUNG_REGS_FIMC_H
+
+/*
+ * Register part
+*/
+/* Input source format */
+#define EXYNOS_CISRCFMT (0x00)
+/* Window offset */
+#define EXYNOS_CIWDOFST (0x04)
+/* Global control */
+#define EXYNOS_CIGCTRL (0x08)
+/* Window offset 2 */
+#define EXYNOS_CIWDOFST2 (0x14)
+/* Y 1st frame start address for output DMA */
+#define EXYNOS_CIOYSA1 (0x18)
+/* Y 2nd frame start address for output DMA */
+#define EXYNOS_CIOYSA2 (0x1c)
+/* Y 3rd frame start address for output DMA */
+#define EXYNOS_CIOYSA3 (0x20)
+/* Y 4th frame start address for output DMA */
+#define EXYNOS_CIOYSA4 (0x24)
+/* Cb 1st frame start address for output DMA */
+#define EXYNOS_CIOCBSA1 (0x28)
+/* Cb 2nd frame start address for output DMA */
+#define EXYNOS_CIOCBSA2 (0x2c)
+/* Cb 3rd frame start address for output DMA */
+#define EXYNOS_CIOCBSA3 (0x30)
+/* Cb 4th frame start address for output DMA */
+#define EXYNOS_CIOCBSA4 (0x34)
+/* Cr 1st frame start address for output DMA */
+#define EXYNOS_CIOCRSA1 (0x38)
+/* Cr 2nd frame start address for output DMA */
+#define EXYNOS_CIOCRSA2 (0x3c)
+/* Cr 3rd frame start address for output DMA */
+#define EXYNOS_CIOCRSA3 (0x40)
+/* Cr 4th frame start address for output DMA */
+#define EXYNOS_CIOCRSA4 (0x44)
+/* Target image format */
+#define EXYNOS_CITRGFMT (0x48)
+/* Output DMA control */
+#define EXYNOS_CIOCTRL (0x4c)
+/* Pre-scaler control 1 */
+#define EXYNOS_CISCPRERATIO (0x50)
+/* Pre-scaler control 2 */
+#define EXYNOS_CISCPREDST (0x54)
+/* Main scaler control */
+#define EXYNOS_CISCCTRL (0x58)
+/* Target area */
+#define EXYNOS_CITAREA (0x5c)
+/* Status */
+#define EXYNOS_CISTATUS (0x64)
+/* Status2 */
+#define EXYNOS_CISTATUS2 (0x68)
+/* Image capture enable command */
+#define EXYNOS_CIIMGCPT (0xc0)
+/* Capture sequence */
+#define EXYNOS_CICPTSEQ (0xc4)
+/* Image effects */
+#define EXYNOS_CIIMGEFF (0xd0)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA0 (0xd4)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA0 (0xd8)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA0 (0xdc)
+/* Input DMA Y Line Skip */
+#define EXYNOS_CIILINESKIP_Y (0xec)
+/* Input DMA Cb Line Skip */
+#define EXYNOS_CIILINESKIP_CB (0xf0)
+/* Input DMA Cr Line Skip */
+#define EXYNOS_CIILINESKIP_CR (0xf4)
+/* Real input DMA image size */
+#define EXYNOS_CIREAL_ISIZE (0xf8)
+/* Input DMA control */
+#define EXYNOS_MSCTRL (0xfc)
+/* Y frame start address for input DMA */
+#define EXYNOS_CIIYSA1 (0x144)
+/* Cb frame start address for input DMA */
+#define EXYNOS_CIICBSA1 (0x148)
+/* Cr frame start address for input DMA */
+#define EXYNOS_CIICRSA1 (0x14c)
+/* Output DMA Y offset */
+#define EXYNOS_CIOYOFF (0x168)
+/* Output DMA CB offset */
+#define EXYNOS_CIOCBOFF (0x16c)
+/* Output DMA CR offset */
+#define EXYNOS_CIOCROFF (0x170)
+/* Input DMA Y offset */
+#define EXYNOS_CIIYOFF (0x174)
+/* Input DMA CB offset */
+#define EXYNOS_CIICBOFF (0x178)
+/* Input DMA CR offset */
+#define EXYNOS_CIICROFF (0x17c)
+/* Input DMA original image size */
+#define EXYNOS_ORGISIZE (0x180)
+/* Output DMA original image size */
+#define EXYNOS_ORGOSIZE (0x184)
+/* Real output DMA image size */
+#define EXYNOS_CIEXTEN (0x188)
+/* DMA parameter */
+#define EXYNOS_CIDMAPARAM (0x18c)
+/* MIPI CSI image format */
+#define EXYNOS_CSIIMGFMT (0x194)
+/* FIMC Clock Source Select */
+#define EXYNOS_MISC_FIMC (0x198)
+
+/* Add for FIMC v5.1 */
+/* Output Frame Buffer Sequence */
+#define EXYNOS_CIFCNTSEQ (0x1fc)
+/* Y 5th frame start address for output DMA */
+#define EXYNOS_CIOYSA5 (0x200)
+/* Y 6th frame start address for output DMA */
+#define EXYNOS_CIOYSA6 (0x204)
+/* Y 7th frame start address for output DMA */
+#define EXYNOS_CIOYSA7 (0x208)
+/* Y 8th frame start address for output DMA */
+#define EXYNOS_CIOYSA8 (0x20c)
+/* Y 9th frame start address for output DMA */
+#define EXYNOS_CIOYSA9 (0x210)
+/* Y 10th frame start address for output DMA */
+#define EXYNOS_CIOYSA10 (0x214)
+/* Y 11th frame start address for output DMA */
+#define EXYNOS_CIOYSA11 (0x218)
+/* Y 12th frame start address for output DMA */
+#define EXYNOS_CIOYSA12 (0x21c)
+/* Y 13th frame start address for output DMA */
+#define EXYNOS_CIOYSA13 (0x220)
+/* Y 14th frame start address for output DMA */
+#define EXYNOS_CIOYSA14 (0x224)
+/* Y 15th frame start address for output DMA */
+#define EXYNOS_CIOYSA15 (0x228)
+/* Y 16th frame start address for output DMA */
+#define EXYNOS_CIOYSA16 (0x22c)
+/* Y 17th frame start address for output DMA */
+#define EXYNOS_CIOYSA17 (0x230)
+/* Y 18th frame start address for output DMA */
+#define EXYNOS_CIOYSA18 (0x234)
+/* Y 19th frame start address for output DMA */
+#define EXYNOS_CIOYSA19 (0x238)
+/* Y 20th frame start address for output DMA */
+#define EXYNOS_CIOYSA20 (0x23c)
+/* Y 21th frame start address for output DMA */
+#define EXYNOS_CIOYSA21 (0x240)
+/* Y 22th frame start address for output DMA */
+#define EXYNOS_CIOYSA22 (0x244)
+/* Y 23th frame start address for output DMA */
+#define EXYNOS_CIOYSA23 (0x248)
+/* Y 24th frame start address for output DMA */
+#define EXYNOS_CIOYSA24 (0x24c)
+/* Y 25th frame start address for output DMA */
+#define EXYNOS_CIOYSA25 (0x250)
+/* Y 26th frame start address for output DMA */
+#define EXYNOS_CIOYSA26 (0x254)
+/* Y 27th frame start address for output DMA */
+#define EXYNOS_CIOYSA27 (0x258)
+/* Y 28th frame start address for output DMA */
+#define EXYNOS_CIOYSA28 (0x25c)
+/* Y 29th frame start address for output DMA */
+#define EXYNOS_CIOYSA29 (0x260)
+/* Y 30th frame start address for output DMA */
+#define EXYNOS_CIOYSA30 (0x264)
+/* Y 31th frame start address for output DMA */
+#define EXYNOS_CIOYSA31 (0x268)
+/* Y 32th frame start address for output DMA */
+#define EXYNOS_CIOYSA32 (0x26c)
+
+/* CB 5th frame start address for output DMA */
+#define EXYNOS_CIOCBSA5 (0x270)
+/* CB 6th frame start address for output DMA */
+#define EXYNOS_CIOCBSA6 (0x274)
+/* CB 7th frame start address for output DMA */
+#define EXYNOS_CIOCBSA7 (0x278)
+/* CB 8th frame start address for output DMA */
+#define EXYNOS_CIOCBSA8 (0x27c)
+/* CB 9th frame start address for output DMA */
+#define EXYNOS_CIOCBSA9 (0x280)
+/* CB 10th frame start address for output DMA */
+#define EXYNOS_CIOCBSA10 (0x284)
+/* CB 11th frame start address for output DMA */
+#define EXYNOS_CIOCBSA11 (0x288)
+/* CB 12th frame start address for output DMA */
+#define EXYNOS_CIOCBSA12 (0x28c)
+/* CB 13th frame start address for output DMA */
+#define EXYNOS_CIOCBSA13 (0x290)
+/* CB 14th frame start address for output DMA */
+#define EXYNOS_CIOCBSA14 (0x294)
+/* CB 15th frame start address for output DMA */
+#define EXYNOS_CIOCBSA15 (0x298)
+/* CB 16th frame start address for output DMA */
+#define EXYNOS_CIOCBSA16 (0x29c)
+/* CB 17th frame start address for output DMA */
+#define EXYNOS_CIOCBSA17 (0x2a0)
+/* CB 18th frame start address for output DMA */
+#define EXYNOS_CIOCBSA18 (0x2a4)
+/* CB 19th frame start address for output DMA */
+#define EXYNOS_CIOCBSA19 (0x2a8)
+/* CB 20th frame start address for output DMA */
+#define EXYNOS_CIOCBSA20 (0x2ac)
+/* CB 21th frame start address for output DMA */
+#define EXYNOS_CIOCBSA21 (0x2b0)
+/* CB 22th frame start address for output DMA */
+#define EXYNOS_CIOCBSA22 (0x2b4)
+/* CB 23th frame start address for output DMA */
+#define EXYNOS_CIOCBSA23 (0x2b8)
+/* CB 24th frame start address for output DMA */
+#define EXYNOS_CIOCBSA24 (0x2bc)
+/* CB 25th frame start address for output DMA */
+#define EXYNOS_CIOCBSA25 (0x2c0)
+/* CB 26th frame start address for output DMA */
+#define EXYNOS_CIOCBSA26 (0x2c4)
+/* CB 27th frame start address for output DMA */
+#define EXYNOS_CIOCBSA27 (0x2c8)
+/* CB 28th frame start address for output DMA */
+#define EXYNOS_CIOCBSA28 (0x2cc)
+/* CB 29th frame start address for output DMA */
+#define EXYNOS_CIOCBSA29 (0x2d0)
+/* CB 30th frame start address for output DMA */
+#define EXYNOS_CIOCBSA30 (0x2d4)
+/* CB 31th frame start address for output DMA */
+#define EXYNOS_CIOCBSA31 (0x2d8)
+/* CB 32th frame start address for output DMA */
+#define EXYNOS_CIOCBSA32 (0x2dc)
+
+/* CR 5th frame start address for output DMA */
+#define EXYNOS_CIOCRSA5 (0x2e0)
+/* CR 6th frame start address for output DMA */
+#define EXYNOS_CIOCRSA6 (0x2e4)
+/* CR 7th frame start address for output DMA */
+#define EXYNOS_CIOCRSA7 (0x2e8)
+/* CR 8th frame start address for output DMA */
+#define EXYNOS_CIOCRSA8 (0x2ec)
+/* CR 9th frame start address for output DMA */
+#define EXYNOS_CIOCRSA9 (0x2f0)
+/* CR 10th frame start address for output DMA */
+#define EXYNOS_CIOCRSA10 (0x2f4)
+/* CR 11th frame start address for output DMA */
+#define EXYNOS_CIOCRSA11 (0x2f8)
+/* CR 12th frame start address for output DMA */
+#define EXYNOS_CIOCRSA12 (0x2fc)
+/* CR 13th frame start address for output DMA */
+#define EXYNOS_CIOCRSA13 (0x300)
+/* CR 14th frame start address for output DMA */
+#define EXYNOS_CIOCRSA14 (0x304)
+/* CR 15th frame start address for output DMA */
+#define EXYNOS_CIOCRSA15 (0x308)
+/* CR 16th frame start address for output DMA */
+#define EXYNOS_CIOCRSA16 (0x30c)
+/* CR 17th frame start address for output DMA */
+#define EXYNOS_CIOCRSA17 (0x310)
+/* CR 18th frame start address for output DMA */
+#define EXYNOS_CIOCRSA18 (0x314)
+/* CR 19th frame start address for output DMA */
+#define EXYNOS_CIOCRSA19 (0x318)
+/* CR 20th frame start address for output DMA */
+#define EXYNOS_CIOCRSA20 (0x31c)
+/* CR 21th frame start address for output DMA */
+#define EXYNOS_CIOCRSA21 (0x320)
+/* CR 22th frame start address for output DMA */
+#define EXYNOS_CIOCRSA22 (0x324)
+/* CR 23th frame start address for output DMA */
+#define EXYNOS_CIOCRSA23 (0x328)
+/* CR 24th frame start address for output DMA */
+#define EXYNOS_CIOCRSA24 (0x32c)
+/* CR 25th frame start address for output DMA */
+#define EXYNOS_CIOCRSA25 (0x330)
+/* CR 26th frame start address for output DMA */
+#define EXYNOS_CIOCRSA26 (0x334)
+/* CR 27th frame start address for output DMA */
+#define EXYNOS_CIOCRSA27 (0x338)
+/* CR 28th frame start address for output DMA */
+#define EXYNOS_CIOCRSA28 (0x33c)
+/* CR 29th frame start address for output DMA */
+#define EXYNOS_CIOCRSA29 (0x340)
+/* CR 30th frame start address for output DMA */
+#define EXYNOS_CIOCRSA30 (0x344)
+/* CR 31th frame start address for output DMA */
+#define EXYNOS_CIOCRSA31 (0x348)
+/* CR 32th frame start address for output DMA */
+#define EXYNOS_CIOCRSA32 (0x34c)
+
+/*
+ * Macro part
+*/
+/* frame start address 1 ~ 4, 5 ~ 32 */
+/* Number of Default PingPong Memory */
+#define DEF_PP 4
+#define EXYNOS_CIOYSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOYSA1 + (__x) * 4) : \
+ (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCBSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCBSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
+#define EXYNOS_CIOCRSA(__x) \
+ (((__x) < DEF_PP) ? \
+ (EXYNOS_CIOCRSA1 + (__x) * 4) : \
+ (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
+/* Number of Default PingPong Memory */
+#define DEF_IPP 1
+#define EXYNOS_CIIYSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
+#define EXYNOS_CIICBSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
+#define EXYNOS_CIICRSA(__x) \
+ (((__x) < DEF_IPP) ? \
+ (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
+
+#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
+#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
+
+#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
+#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
+
+#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
+#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
+
+#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
+#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
+#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
+
+#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
+#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
+
+#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
+
+#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
+#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
+#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
+#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
+#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
+
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
+#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
+
+#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
+#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
+#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
+
+#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
+
+#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
+
+#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
+#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
+
+#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
+#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
+#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
+
+#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
+
+/*
+ * Bit definition part
+*/
+/* Source format register */
+#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
+#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
+#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
+#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
+#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
+/* ITU601 16bit only */
+#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
+
+/* Window offset register */
+#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
+#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
+#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
+#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
+#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
+#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
+#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
+
+/* Global control register */
+#define EXYNOS_CIGCTRL_SWRST (1 << 31)
+#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
+#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
+#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
+#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
+#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
+#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
+#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
+#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
+#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
+#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
+#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
+#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
+#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
+#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
+#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
+#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
+#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
+#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
+#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
+#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
+#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
+#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
+#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
+#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
+#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
+#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
+#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
+
+/* Window offset2 register */
+#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
+#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
+
+/* Target format register */
+#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
+#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
+#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
+#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
+#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
+#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
+#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
+#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
+#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
+#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
+#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
+
+/* Output DMA control register */
+#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
+#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
+#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
+#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
+#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
+#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
+#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
+#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
+#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
+#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
+#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
+
+/* Main scaler control register */
+#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
+#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
+#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
+#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
+#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
+#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
+#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
+#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
+#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
+#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
+#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
+#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
+#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
+#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
+#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
+#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
+#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
+#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
+#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
+
+/* Status register */
+#define EXYNOS_CISTATUS_OVFIY (1 << 31)
+#define EXYNOS_CISTATUS_OVFICB (1 << 30)
+#define EXYNOS_CISTATUS_OVFICR (1 << 29)
+#define EXYNOS_CISTATUS_VSYNC (1 << 28)
+#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
+#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
+#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
+#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
+#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
+#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
+#define EXYNOS_CISTATUS_OVRLB (1 << 18)
+#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
+#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
+#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
+#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
+
+/* Image capture enable register */
+#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
+#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
+#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
+#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
+
+/* Image effects register */
+#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
+#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
+#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
+#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
+#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
+#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
+#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
+#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
+#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
+#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+
+/* Real input DMA size register */
+#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
+#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
+#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
+#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
+
+/* Input DMA control register */
+#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
+#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
+#define EXYNOS_MSCTRL_BURST_CNT (24)
+#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
+#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
+#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
+#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
+#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
+#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
+#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
+#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
+#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
+#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
+#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
+#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
+#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
+#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
+#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
+#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
+#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
+#define EXYNOS_MSCTRL_ENVID (1 << 0)
+
+/* DMA parameter register */
+#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
+#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
+#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
+#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
+
+/* Gathering Extension register */
+#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
+#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
+#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
+#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
+#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
+
+/* FIMC Clock Source Select register */
+#define EXYNOS_CLKSRC_HCLK (0 << 1)
+#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
+#define EXYNOS_CLKSRC_SCLK (1 << 1)
+
+/* SYSREG for FIMC writeback */
+#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
+#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
+#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
+#define SYSREG_FIMD0WB_DEST_SHIFT 23
+
+#endif /* SAMSUNG_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 0000000..8ec160b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,295 @@
+/* linux/drivers/gpu/drm/exynos/regs-gsc.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Register definition file for Samsung G-Scaler driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef REGS_GSC_H_
+#define REGS_GSC_H_
+
+/* SYSCON. GSCBLK_CFG */
+#include <plat/map-base.h>
+#include <plat/cpu.h>
+#define SYSREG_DISP1BLK_CFG (S3C_VA_SYS + 0x0214)
+#define FIFORST_DISP1 (1 << 23)
+#define GSC_OUT_MIXER0 (1 << 7)
+#define GSC_OUT_MIXER0_GSC3 (3 << 5)
+#define SYSREG_GSCBLK_CFG0 (S3C_VA_SYS + 0x0220)
+#define GSC_OUT_DST_FIMD_SEL(x) (1 << (8 + 2 * (x)))
+#define GSC_OUT_DST_MXR_SEL(x) (2 << (8 + 2 * (x)))
+#define GSC_PXLASYNC_RST(x) (1 << (x))
+#define PXLASYNC_LO_MASK_CAMIF_TOP (1 << 20)
+#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
+#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
+#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
+#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
+#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
+#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
+#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
+
+/* G-Scaler enable */
+#define GSC_ENABLE 0x00
+#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
+#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
+#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
+#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
+#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
+#define GSC_ENABLE_NORM_MODE (0 << 7)
+#define GSC_ENABLE_IPC_MODE (1 << 7)
+#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
+#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
+#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
+#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
+#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
+#define GSC_ENABLE_QOS_ENABLE (1 << 3)
+#define GSC_ENABLE_OP_STATUS (1 << 2)
+#define GSC_ENABLE_SFR_UPDATE (1 << 1)
+#define GSC_ENABLE_ON (1 << 0)
+
+/* G-Scaler S/W reset */
+#define GSC_SW_RESET 0x04
+#define GSC_SW_RESET_SRESET (1 << 0)
+
+/* G-Scaler IRQ */
+#define GSC_IRQ 0x08
+#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
+#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
+#define GSC_IRQ_OR_MASK (1 << 2)
+#define GSC_IRQ_FRMDONE_MASK (1 << 1)
+#define GSC_IRQ_ENABLE (1 << 0)
+
+/* G-Scaler input control */
+#define GSC_IN_CON 0x10
+#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
+#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
+#define GSC_IN_RB_SWAP_MASK (1 << 19)
+#define GSC_IN_RB_SWAP (1 << 19)
+#define GSC_IN_ROT_MASK (7 << 16)
+#define GSC_IN_ROT_270 (7 << 16)
+#define GSC_IN_ROT_90_YFLIP (6 << 16)
+#define GSC_IN_ROT_90_XFLIP (5 << 16)
+#define GSC_IN_ROT_90 (4 << 16)
+#define GSC_IN_ROT_180 (3 << 16)
+#define GSC_IN_ROT_YFLIP (2 << 16)
+#define GSC_IN_ROT_XFLIP (1 << 16)
+#define GSC_IN_RGB_TYPE_MASK (3 << 14)
+#define GSC_IN_RGB_HD_WIDE (3 << 14)
+#define GSC_IN_RGB_HD_NARROW (2 << 14)
+#define GSC_IN_RGB_SD_WIDE (1 << 14)
+#define GSC_IN_RGB_SD_NARROW (0 << 14)
+#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
+#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
+#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
+#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
+#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
+#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
+#define GSC_IN_FORMAT_MASK (7 << 8)
+#define GSC_IN_XRGB8888 (0 << 8)
+#define GSC_IN_RGB565 (1 << 8)
+#define GSC_IN_YUV420_2P (2 << 8)
+#define GSC_IN_YUV420_3P (3 << 8)
+#define GSC_IN_YUV422_1P (4 << 8)
+#define GSC_IN_YUV422_2P (5 << 8)
+#define GSC_IN_YUV422_3P (6 << 8)
+#define GSC_IN_TILE_TYPE_MASK (1 << 4)
+#define GSC_IN_TILE_C_16x8 (0 << 4)
+#define GSC_IN_TILE_C_16x16 (1 << 4)
+#define GSC_IN_TILE_MODE (1 << 3)
+#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
+#define GSC_IN_LOCAL_CAM3 (3 << 1)
+#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
+#define GSC_IN_LOCAL_CAM1 (1 << 1)
+#define GSC_IN_LOCAL_CAM0 (0 << 1)
+#define GSC_IN_PATH_MASK (1 << 0)
+#define GSC_IN_PATH_LOCAL (1 << 0)
+#define GSC_IN_PATH_MEMORY (0 << 0)
+
+/* G-Scaler source image size */
+#define GSC_SRCIMG_SIZE 0x14
+#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
+#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
+#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler source image offset */
+#define GSC_SRCIMG_OFFSET 0x18
+#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler cropped source image size */
+#define GSC_CROPPED_SIZE 0x1C
+#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
+#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
+#define GSC_CROPPED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler output control */
+#define GSC_OUT_CON 0x20
+#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
+#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
+#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
+#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
+#define GSC_OUT_RB_SWAP_MASK (1 << 12)
+#define GSC_OUT_RB_SWAP (1 << 12)
+#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
+#define GSC_OUT_RGB_HD_NARROW (3 << 10)
+#define GSC_OUT_RGB_HD_WIDE (2 << 10)
+#define GSC_OUT_RGB_SD_NARROW (1 << 10)
+#define GSC_OUT_RGB_SD_WIDE (0 << 10)
+#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
+#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
+#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
+#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
+#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
+#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
+#define GSC_OUT_FORMAT_MASK (7 << 4)
+#define GSC_OUT_XRGB8888 (0 << 4)
+#define GSC_OUT_RGB565 (1 << 4)
+#define GSC_OUT_YUV420_2P (2 << 4)
+#define GSC_OUT_YUV420_3P (3 << 4)
+#define GSC_OUT_YUV422_1P (4 << 4)
+#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV444 (7 << 4)
+#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+#define GSC_OUT_TILE_C_16x8 (0 << 2)
+#define GSC_OUT_TILE_C_16x16 (1 << 2)
+#define GSC_OUT_TILE_MODE (1 << 1)
+#define GSC_OUT_PATH_MASK (1 << 0)
+#define GSC_OUT_PATH_LOCAL (1 << 0)
+#define GSC_OUT_PATH_MEMORY (0 << 0)
+
+/* G-Scaler scaled destination image size */
+#define GSC_SCALED_SIZE 0x24
+#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
+#define GSC_SCALED_HEIGHT(x) ((x) << 16)
+#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
+#define GSC_SCALED_WIDTH(x) ((x) << 0)
+
+/* G-Scaler pre scale ratio */
+#define GSC_PRE_SCALE_RATIO 0x28
+#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
+#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
+#define GSC_PRESC_V_RATIO_MASK (7 << 16)
+#define GSC_PRESC_V_RATIO(x) ((x) << 16)
+#define GSC_PRESC_H_RATIO_MASK (7 << 0)
+#define GSC_PRESC_H_RATIO(x) ((x) << 0)
+
+/* G-Scaler main scale horizontal ratio */
+#define GSC_MAIN_H_RATIO 0x2C
+#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler main scale vertical ratio */
+#define GSC_MAIN_V_RATIO 0x30
+#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
+#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
+
+/* G-Scaler input chrominance stride */
+#define GSC_IN_CHROM_STRIDE 0x3C
+#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler destination image size */
+#define GSC_DSTIMG_SIZE 0x40
+#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
+#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
+#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
+#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
+
+/* G-Scaler destination image offset */
+#define GSC_DSTIMG_OFFSET 0x44
+#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
+#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
+#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
+#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
+
+/* G-Scaler output chrominance stride */
+#define GSC_OUT_CHROM_STRIDE 0x48
+#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
+#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
+
+/* G-Scaler input y address mask */
+#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
+/* G-Scaler input y base address */
+#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
+/* G-Scaler input y base current address */
+#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
+
+/* G-Scaler input cb address mask */
+#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
+/* G-Scaler input cb base address */
+#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
+/* G-Scaler input cb base current address */
+#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
+
+/* G-Scaler input cr address mask */
+#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
+/* G-Scaler input cr base address */
+#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
+/* G-Scaler input cr base current address */
+#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
+
+/* G-Scaler input address mask */
+#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
+
+/* G-Scaler output y address mask */
+#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
+/* G-Scaler output y base address */
+#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
+
+/* G-Scaler output cb address mask */
+#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
+/* G-Scaler output cb base address */
+#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
+
+/* G-Scaler output cr address mask */
+#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
+/* G-Scaler output cr base address */
+#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
+
+/* G-Scaler output address mask */
+#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
+#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
+#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
+#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
+
+/* G-Scaler horizontal scaling filter */
+#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler vertical scaling filter */
+#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
+
+/* G-Scaler BUS control */
+#define GSC_BUSCON 0xA78
+#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
+#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
+#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
+#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
+#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
+
+/* G-Scaler V position */
+#define GSC_VPOSITION 0xA7C
+#define GSC_VPOS_F(x) ((x) << 0)
+
+
+/* G-Scaler clock initial count */
+#define GSC_CLK_INIT_COUNT 0xC00
+#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
+
+/* G-Scaler clock snoop count */
+#define GSC_CLK_SNOOP_COUNT 0xC04
+#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
+
+#endif /* REGS_GSC_H_ */